blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03f52294b6ae8c54a9f097c5e9a3df1f9bdb0115 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/pfnet_chainer/chainer-master/tests/chainer_tests/links_tests/connection_tests/test_deconvolution_nd.py | 5ba25749e626b7c36370304d1ad40afe3db55bb1 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 4,199 | py | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import initializers
from chainer.links import deconvolution_nd
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.testing import parameterize
from chainer.utils import conv
@parameterize(*testing.product({
'dims': [(5, 4, 3), (4, 3), (3,)],
'nobias': [True, False],
'dtype': [numpy.float32],
'use_cudnn': [True, False],
'used_outsize': ['case1', 'case2', 'None'],
}) + testing.product({
'dims': [(5, 4, 3)],
'nobias': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_cudnn': [True],
'used_outsize': ['None'],
}))
class TestDeconvolutionND(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
stride = (2,) * ndim
pad = (1,) * ndim
if self.used_outsize == 'case1' or self.used_outsize == 'None':
# Use output size determined with get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p)
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
elif self.used_outsize == 'case2':
# Use possible output size other than the one determined with
# get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p) + 1
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
if self.used_outsize != 'None':
outsize = outs
else:
outsize = None
if not self.nobias:
initial_bias = initializers.Uniform(scale=1, dtype=self.dtype)
else:
initial_bias = None
self.link = deconvolution_nd.DeconvolutionND(
ndim, in_channels, out_channels, ksize, stride=stride, pad=pad,
outsize=outsize, initial_bias=initial_bias)
self.link.cleargrads()
x_shape = (N, in_channels) + self.dims
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
gy_shape = (N, out_channels) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {
'eps': 1e-2, 'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_backward_options = {
'eps': 2 ** -3, 'atol': 1e-2, 'rtol': 1e-1}
def check_forward_consistency(self, link, x_data):
x_cpu = chainer.Variable(x_data)
y_cpu = link(x_cpu)
self.assertEqual(y_cpu.data.dtype, x_data.dtype)
link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(x_data))
y_gpu = link(x_gpu)
self.assertEqual(y_gpu.data.dtype, x_data.dtype)
testing.assert_allclose(
y_cpu.data, y_gpu.data, **self.check_forward_options)
@attr.gpu
@condition.retry(3)
def test_forward_consistency(self):
self.link.use_cudnn = self.use_cudnn
self.check_forward_consistency(self.link, self.x)
def check_backward(self, link, x_data, y_grad):
params = [link.W]
if not self.nobias:
params.append(link.b)
gradient_check.check_backward(
link, x_data, y_grad, params, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.link, self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.use_cudnn = self.use_cudnn
self.link.to_gpu()
self.check_backward(
self.link, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestDeconvolutionNDNoInitialBias(unittest.TestCase):
def test_no_initial_bias(self):
ndim = 3
ksize = 3
link = deconvolution_nd.DeconvolutionND(
ndim, 3, 2, ksize, initial_bias=None)
self.assertIsNone(link.b)
testing.run_module(__name__, __file__)
| [
"659338505@qq.com"
] | 659338505@qq.com |
f4b7c388edf75b83c3b1a73d48fcbd7b0f0cb3be | e25c3d29713a508ba2f4b76f6416b8f260429723 | /utils.py | 8c8e245b8f132c77a6e6ff940c97de33870d6e4e | [] | no_license | EaseCloud/face-api | 7c22219a8cdf65b408a369cfba6ac2930462889b | 0fff8ab7d74f1b0c669ca1bac8efbc01f13867be | refs/heads/master | 2020-03-10T10:13:46.774712 | 2018-04-17T03:29:25 | 2018-04-17T03:29:25 | 129,328,632 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,756 | py | import json
import os
import pickle
import sys
import face_recognition
from bottle import Response
import config
def read_data(group_id):
""" Read the face encodings of a specific group from the repository.
:param group_id:
:return:
"""
path = os.path.join(config.DIR_DATA, group_id)
os.makedirs(os.path.dirname(path), exist_ok=True)
if not os.path.exists(path):
pickle.dump({}, open(path, 'wb'))
return pickle.load(open(path, 'rb'))
def write_data(group_id, face_id, face):
""" Write a face to a group repository.
:param group_id:
:param face_id:
:param face:
:return:
"""
data = read_data(group_id)
data[face_id] = face
path = os.path.join(config.DIR_DATA, group_id)
pickle.dump(data, open(path, 'wb'))
def make_response(msg='', ok=True, data=None):
print('[{}] {}'.format('Success' if ok else 'Fail', msg), file=sys.stderr)
payload = dict(ok=ok, msg=msg)
if data:
print(data, file=sys.stderr)
payload['data'] = data
return Response(
json.dumps(payload),
status=200 if ok else 400,
headers={'Content-Type': 'application/json'},
)
def upload(group_id, face_id, path):
# Parse the faces from the uploaded file
image = face_recognition.load_image_file(path)
faces = face_recognition.api.face_encodings(image, num_jitters=config.JITTERS)
if len(faces) == 0:
return make_response('No face detected.', False)
elif len(faces) > 1:
return make_response('More than one face detected.', False)
write_data(group_id, face_id, faces[0])
return make_response('Upload success: group_id={} face_id={}'.format(group_id, face_id))
def recognize(group_id, path, keys=None):
names = []
faces = []
for key, value in read_data(group_id).items():
if not keys or key in keys:
names.append(key)
faces.append(value)
# Parse the faces in the uploaded image
image = face_recognition.load_image_file(path)
matches = set()
upload_faces = face_recognition.api.face_encodings(image, num_jitters=config.JITTERS)
print('{} faces detected in the picture'.format(len(upload_faces)), file=sys.stderr)
if len(upload_faces) > 4:
return make_response('Too many faces in the picture', False)
# Recognize the faces
for face in upload_faces:
results = face_recognition.compare_faces(faces, face, config.TOLERANCE)
for name, success in zip(names, results):
if success:
matches.add(name)
# Response
if matches:
return make_response('Matched {} faces.'.format(len(matches)), data=list(matches))
else:
return make_response('No matches.', False)
| [
"57082212@qq.com"
] | 57082212@qq.com |
d82167ca61a739e2d8c6919137e144a987ee22a3 | 70922de165319283d640821fd42ea1806da402c0 | /math/0x00-linear_algebra/8-ridin_bareback.py | 99e94b246997a78ccbb5d33ce5396ebf2ac47d12 | [] | no_license | ikki2530/holbertonschool-machine_learning | bdd8152d575a99281e2cce105cf87442ec07f2fb | 0b56aa0e92d65d4a5832cc994769834fbcfbe0ac | refs/heads/main | 2023-07-07T00:49:03.675328 | 2021-08-11T10:27:56 | 2021-08-11T10:27:56 | 317,352,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | #!/usr/bin/env python3
"""multiply 2 matrices"""
def matrix_shape(matrix):
"""
matrix: matrix to calcuted the shape
Return: A list with the matrix shape [n, m],
n is the number of rows and m number of columns
"""
lista = []
if type(matrix) == list:
dm = len(matrix)
lista.append(dm)
lista = lista + matrix_shape(matrix[0])
return lista
else:
return lista
def mat_mul(mat1, mat2):
"""
Description: performs matrix multiplication, 2D matrices
Returns: a new matrix with the results of the multiplication
if it is possible, None otherwise
"""
shape1 = matrix_shape(mat1)
shape2 = matrix_shape(mat2)
suma = 0
resultado = []
temp = []
if shape1[1] == shape2[0]:
for k in range(len(mat1)):
for i in range(len(mat2[0])):
for j in range(len(mat1[0])):
suma += mat1[k][j] * mat2[j][i]
temp.append(suma)
suma = 0
resultado.append(temp)
temp = []
return resultado
else:
return None
| [
"dagomez2530@gmail.com"
] | dagomez2530@gmail.com |
55bf4b302ed6ffdbe5175081e2ee071bd0c2e622 | 39d9ba65172cb170eab158ce732748f36eb5da02 | /dquora/messager/models.py | d9dddae4e288e02133b0cca5084d77be7ed5fc3a | [
"MIT"
] | permissive | adivxkk/Dquora | c3fec218922c33caebdf45211e63fa88e9e83f8b | 8e9f910eaab9fd109286572fd65b0918d93f83b9 | refs/heads/main | 2023-07-16T20:40:52.156804 | 2021-08-21T11:12:24 | 2021-08-21T11:12:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | py | import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.conf import settings
class MessageQuerySet(models.query.QuerySet):
"""自定义Message的QuerySet API"""
def get_conversation(self, sender, recipient):
"""用户间的私信会话"""
qs_one = self.filter(sender=sender, recipient=recipient) # A发送给B的消息
qs_two = self.filter(sender=recipient, recipient=sender) # B发送给A的消息
return qs_one.union(qs_two).order_by('created_at') # 取并集后按时间排序,结果时QuerySet
def get_most_recent_conversation(self, recipient):
"""获取最近一次私信互动的用户"""
try:
qs_sent = self.filter(sender=recipient) # 当前登录用户发送的消息
qs_received = self.filter(recipient=recipient) # 当前登录用户接收的消息
qs = qs_sent.union(qs_received).latest("created_at") # 最后一条消息
if qs.sender == recipient:
# 如果登录用户有发送消息,返回消息的接收者
return qs.recipient
# 否则返回消息的发送者
return qs.sender
except self.model.DoesNotExist:
# 如果模型实例不存在,则返回当前用户
return get_user_model().objects.get(username=recipient.username)
class Message(models.Model):
"""用户间私信"""
uuid_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_messages', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name='发送者')
recipient = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='received_messages', blank=True, null=True,
on_delete=models.SET_NULL, verbose_name='接受者')
message = models.TextField(blank=True, null=True, verbose_name='内容')
unread = models.BooleanField(default=True, verbose_name='是否未读') # True未读
created_at = models.DateTimeField(db_index=True, auto_now_add=True,
verbose_name='创建时间') # 没有updated_at,私信发送之后不能修改或撤回
objects = MessageQuerySet.as_manager()
class Meta:
verbose_name = '私信'
verbose_name_plural = verbose_name
ordering = ('-created_at',)
def __str__(self):
return self.message
def mark_as_read(self):
if self.unread:
self.unread = False
self.save()
| [
"xlyjxkk@gmail.com"
] | xlyjxkk@gmail.com |
0d5b9fa22ce5bdb993ac375103383a9c6ba6c959 | 167c6226bc77c5daaedab007dfdad4377f588ef4 | /python/ql/test/2/library-tests/modules/general/package/__init__.py | d5be97cd18230f5a4a71a5e875574face58adfb1 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | github/codeql | 1eebb449a34f774db9e881b52cb8f7a1b1a53612 | d109637e2d7ab3b819812eb960c05cb31d9d2168 | refs/heads/main | 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 | MIT | 2023-09-14T19:36:50 | 2018-07-31T16:35:51 | CodeQL | UTF-8 | Python | false | false | 213 | py |
a = 1
b = 2
c = 3
#Implicit relative import
from helper import d
#Explicit relative import
from .helper import g
from .assistant import f
#This will be an implicit relative import (in Python 2)
import helper
| [
"mark@hotpy.org"
] | mark@hotpy.org |
ac83d2dffda56e4aa58e5269ad131cf6fc0edd88 | cc9a87e975546e2ee2957039cceffcb795850d4f | /venv/bin/pip | 5d0b1e4937781dd43907661fd781a3862fbd0277 | [] | no_license | CodeHunterDev/Belajar-Python | 304d3243801b91b3605d2b9bd09e49a30735e51b | 9dd2ffb556eed6b2540da19c5f206fedb218ae99 | refs/heads/master | 2023-03-19T22:12:46.330272 | 2020-02-04T08:02:00 | 2020-02-04T08:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | #!/home/adam/PyCharm/HelloAdamPython/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
# Copyright (c) 2020. Adam Arthur Faizal
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"adam.faizal.af6@gmail.com"
] | adam.faizal.af6@gmail.com | |
33762bb875c6b2c49a26c4748aa607e6b82072fc | 5f203dc298a40d47835882b9c3b37e93ebc015d6 | /mf/python/mknfold.py | 22c45920a9b53331b02fad5295461d2269f483b1 | [] | no_license | chan-y-park/ML-SGHMC | 6489e7f2808b849983d2b970bc2c471a61cd8a3f | 92d555b7360060eb452efd72e4493dac39412021 | refs/heads/master | 2020-05-07T18:18:13.601213 | 2019-04-15T06:58:20 | 2019-04-15T06:58:20 | 180,761,444 | 0 | 0 | null | 2019-04-11T09:41:04 | 2019-04-11T09:41:04 | null | UTF-8 | Python | false | false | 798 | py | #!/usr/bin/python
import sys
import random
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'usage: <input> <fold> [nfold=5]'
exit(-1)
if len( sys.argv ) > 3:
nfold = int( sys.argv[3] )
else:
nfold = 5
fold = int( sys.argv[2] )
assert fold > 0 and fold <= nfold
random.seed( 0 )
fo = open( 'fold%d.txt' % fold, 'w' )
for l in open( sys.argv[1] ):
arr = l.split()
uid,iid, sc = int(arr[0]),int(arr[1]), float(arr[2])
if random.randint( 1, nfold ) == fold:
# test is 1
ngf = 1
else:
ngf = 0
fo.write('%f\t%d\t1\t1\t' % (sc, ngf ) )
if ngf != 0:
fo.write('0:0 ')
fo.write('%d:1 %d:1\n' %(uid,iid))
fo.close()
| [
"tianqi.tchen@gmail.com"
] | tianqi.tchen@gmail.com |
12e1b5d0adfaa938385583586815b054f90b1494 | 6c92a0d1cf4e79a1f7a5d883184b397625e957be | /Day048/Cookie Clicker Practice.py | 880b23f9cad787f17b60b491ebd65386da0da3ef | [] | no_license | Hasib104/Learning-Python | 5667c52e98812da36275412a481298f4b38f8099 | 0f731624fb6572fdfbb0d09d2aa1ffb3d3247796 | refs/heads/main | 2023-03-26T18:33:35.168018 | 2021-03-18T13:25:36 | 2021-03-18T13:25:36 | 327,512,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py |
from selenium import webdriver
import time
chrome_driver_path = "C:\Development\chromedriver.exe"
driver = webdriver.Chrome(chrome_driver_path)
driver.get("http://orteil.dashnet.org/experiments/cookie/")
# Get cookie to click on.
cookie = driver.find_element_by_id("cookie")
# Get upgrade item ids.
items = driver.find_elements_by_css_selector("#store div")
item_ids = [item.get_attribute("id") for item in items]
#print(item_ids)
timeout = time.time() + 3
five_min = time.time() + 60 * 5
while True:
cookie.click()
#finding money and making is int()
money = driver.find_element_by_css_selector("#game #money")
money_text_int = int(money.text)
#print(money_text_int)
prices = []
if time.time() > timeout:
#finding all the prices for upgrades
finding_all_prices_tag = driver.find_elements_by_css_selector("#store b")
#splitting the prices from names
for price in finding_all_prices_tag:
all_prices_tag_text = price.text
if all_prices_tag_text != "":
just_price_int = int(all_prices_tag_text.split("-")[1].strip().replace(",", ""))
prices.append(just_price_int)
#print(prices)
#making a dictionary for upgrades price : id
cookie_upgrades = {}
for i in range(len(prices)):
cookie_upgrades[prices[i]] = item_ids[i]
#print(cookie_upgrades)
#making a dictionary for affordable upgrades
# affordable_upgrades = {}
# for cost, id in cookie_upgrades.items():
# if money_text_int > cost:
# affordable_upgrades[cost] = id
# #print(affordable_upgrades)
affordable_upgrades = {cost:id for (cost,id) in cookie_upgrades.items() if money_text_int > cost}
#buying the highest upgrade
highest_upgrade = max(affordable_upgrades)
highest_upgrade_id = affordable_upgrades[highest_upgrade]
driver.find_element_by_id(highest_upgrade_id).click()
#adding a timeout so that the code doesnt crash(highest_upgrade = max(affordable_upgrades) \nValueError: max() arg is an empty sequence), this helps the game's score to increase.
timeout = time.time() + 3
if time.time() > five_min:
cps = driver.find_element_by_css_selector("#game #cps").text
print(cps)
break
| [
"noreply@github.com"
] | Hasib104.noreply@github.com |
fdc4ec77c6a13586c430aa41fa72288ca3ad27f2 | 07e3e716cd5ae33f5c88c40ede090645a723db9f | /backend/home/migrations/0002_load_initial_data.py | 7626e1b98d0c44979567332c51672739b0aef63f | [] | no_license | crowdbotics-apps/dating-22486 | bb8886a455d581b0c27cef8395c8369bc85fae72 | 1bb68c3095ffc29e5cd330cf426b9b1e35c2c360 | refs/heads/master | 2023-01-06T16:43:18.990380 | 2020-11-10T19:00:28 | 2020-11-10T19:00:28 | 311,755,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "dating"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">dating</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "dating-22486.botics.co"
site_params = {
"name": "dating",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4752f1bf9ae6ca9a6d4829ac7312e28b36909a17 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/TileCalorimeter/TileConditions/share/convertTimingResiduals.py | a83e68743fe1d4bb285589af690757802de92f78 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | py | #!/usr/bin/env python
import sys
#
#__________________________________________________________________
def dec2hex(n):
return ("0%X" % n)[-2:]
#
#__________________________________________________________________
def pmt2channel_LB(pmtOrChannel):
"""
This function converts channel to pmt or pmt to channel number.
Valid for Barrel only!
"""
pmtOrChannel = int(pmtOrChannel)
channelOrPmt = pmtOrChannel;
# pmt==channel for pmt 0 - 23
# pmt 24 - 47: reverse in groups of 3
if pmtOrChannel>23:
mod = pmtOrChannel%3
if mod==0: channelOrPmt = pmtOrChannel+2
elif mod==2: channelOrPmt = pmtOrChannel-2
else : pass
return channelOrPmt
#
#__________________________________________________________________
modToFrag = {'LBA' : '0x1',
'LBC' : '0x2',
'EBA' : '0x3',
'EBC' : '0x4'}
inFile = sys.argv[1]
tdlas = ""
tclas = ""
lines = open(inFile,"r").readlines()
for line in lines:
field = line.split()
if not len(field): continue
module = field[0][:3]
modNum = int(field[0][3:]) - 1
drawerOffset = float(field[1])
chanOffsets = field[2:]
hexModule = modToFrag[module] + dec2hex(modNum)
#=== some sanity checks
sum=0.
for chan in xrange(6):
add = float(chanOffsets[chan])
sum += add
print "%s ---> Found %i channels, sum of first 6 is %f" % ( field[0] , len(chanOffsets) , sum )
#====================================================
#=== fill tdlas (channel is always 0)
#====================================================
tdlas = tdlas+ "Tdlas\t%s\t0\t%s\n" % (hexModule,drawerOffset)
#====================================================
#=== fill tclas
#====================================================
for chan in xrange(48):
offset = chanOffsets[chan]
tclas = tclas+"Tclas\t%s\t%i\t%s\t%s\n" % (hexModule,chan,offset,offset)
tdlasFile = open("Tile.tdlas","w")
tdlasFile.write(tdlas)
tdlasFile.close()
tclasFile = open("Tile.tclas","w")
tclasFile.write(tclas)
tclasFile.close()
# print "---------------TDLAS--------------------------"
# print tdlas
# print "---------------TDLAS--------------------------"
# print "---------------TCLAS--------------------------"
# print tclas
# print "---------------TCLAS--------------------------"
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
cfa0e6c206169e8e5fed6ab2f5e5178f1c1e0c4f | fa52ee094776f69f0214ffbe24281a9736eaaa40 | /solutions/114_Flatten_Binary_Tree_to_Linked_List_1.py | b41a2f5011b771075bb35a38a3f043abb3f56591 | [] | no_license | hank08tw/CodeFromLeetcode | 57898916c2b903b1ecbc3d0ed063b36531d74e93 | 41b2936600dd392627a4f6e146559739bb88da45 | refs/heads/master | 2021-05-03T04:26:28.239904 | 2015-06-07T17:31:06 | 2015-06-07T17:31:06 | 120,615,373 | 1 | 0 | null | 2018-02-07T12:58:40 | 2018-02-07T12:58:39 | null | UTF-8 | Python | false | false | 615 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {void} Do not return anything, modify root in-place instead.
def flatten(self, root):
stack = []
p = root
while p != None or stack:
if p.right:
stack.append(p.right)
if p.left:
p.right = p.left
p.left = None
elif stack:
p.right = stack.pop()
p = p.right | [
"yao.zhao9101@gmail.com"
] | yao.zhao9101@gmail.com |
3f2546636e7e7b9abb20725f5c413bb9d72a55f2 | e27d6cf969bc1e12f61fcf09aa5cab211744e352 | /TerrainData/classifyNB.py | 1ffa57f0132f34baa276949992356aea8eb34850 | [] | no_license | bnajafi/Gaussian_Naive_Bayes_SKLearn | 6fd2ed0e43020ca86f4c8ca8946c630b0dd719ba | d064ec4665e35d178960a3afd2fade02fb7ba118 | refs/heads/master | 2021-01-20T20:47:32.812728 | 2017-06-12T11:04:54 | 2017-06-12T11:04:54 | 64,767,625 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | def classify(features_train, labels_train):
### import the sklearn module for GaussianNB
from sklearn.naive_bayes import GaussianNB
### create classifier
clf= GaussianNB()
### fit the classifier on the training features and labels
clf.fit(features_train,labels_train)
### return the fit classifier
return clf
### your code goes here!
| [
"behzad najafi"
] | behzad najafi |
14961a035531cf4e9da1f2cb7f06758d31a3e389 | f33808410c0ad3a16099a81a130b8e2c25d4e641 | /modules/implant/persist/wmi.py | ef97800b0a0e03e908338e62b90d11b3e0725904 | [
"Apache-2.0"
] | permissive | marciopocebon/entypreter | 96994ec6a6e35f4b31bf1b16aeff29b75c7b5bc3 | 6a165589c4853c33e7f5eb6fd3a1326bfc37870e | refs/heads/master | 2020-09-05T06:54:31.674617 | 2019-11-05T13:48:35 | 2019-11-05T13:48:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,937 | py | import core.job
import core.implant
import core.loader
import uuid
import string
import random
class WMIPersistJob(core.job.Job):
def create(self):
if self.session_id == -1:
self.error("0", "This job is not yet compatible with ONESHOT stagers.", "ONESHOT job error.", "")
return False
if self.session.elevated != 1:
self.error("0", "This job requires an elevated session.", "Not elevated!", "")
return False
id = self.options.get("PAYLOAD")
payload = self.load_payload(id)
self.options.set("CMD", payload)
self.options.set("DIRECTORY", self.options.get('DIRECTORY').replace("\\", "\\\\").replace('"', '\\"'))
self.options.set("FDROPDIR", self.options.get('DROPDIR').replace("\\", "\\\\").replace('"', '\\"'))
if self.options.get('DROPFILE'):
self.options.set('FDROPFILE', self.options.get('DROPFILE')+'.hta')
else:
self.options.set('DROPFILE', ''.join(random.choice(string.ascii_uppercase) for _ in range(10)))
self.options.set('FDROPFILE', self.options.get('DROPFILE')+'.hta')
def report(self, handler, data, sanitize = False):
task = handler.get_header("Task", False)
upload = handler.get_header('X-UploadFileJob', False)
if upload == "true":
dropper_script = core.loader.load_script(self.options.get("LDROPFILE"), self.options)
template = core.loader.load_script("data/stager/js/mshta/template.hta")
fdata = handler.post_process_script(dropper_script, template, self.options, self.session, False)
headers = {}
headers['Content-Type'] = 'application/octet-stream'
headers['Content-Length'] = len(fdata)
handler.reply(200, fdata, headers)
return
data = data.decode()
if task == "CreateFilter":
handler.reply(200)
if data:
self.shell.print_good("__EventFilter created!")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH __EventFilter WHERE Name=\"Entypreter\" DELETE")
else:
self.shell.print_error("__EventFilter could not be created, this implant will probably fail :/")
return
if task == "CreateConsumer":
handler.reply(200)
if data:
self.shell.print_good("CommandLineEventConsumer created!")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH CommandLineEventConsumer WHERE Name=\"Entypreter\" DELETE")
else:
self.shell.print_error("CommandLineEventConsumer could not be created, this implant will probably fail :/")
return
if task == "CreateBinding":
handler.reply(200)
if data:
self.shell.print_good("__FilterToConsumerBinding created! Persistence has been established! If the target reboots, a session should come back 4-5 minutes later :)")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH __FilterToConsumerBinding WHERE \"__PATH like '%Entypreter%'\" DELETE")
else:
self.shell.print_error("__FilterToConsumerBinding could not be created, this implant will probably fail :/")
return
if task == "RemovePersistence":
handler.reply(200)
if data:
self.shell.print_good("Persistence removed successfully.")
else:
self.shell.print_error("Could not remove persistence :/")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH __EventFilter WHERE Name=\"Entypreter\" DELETE")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH CommandLineEventConsumer WHERE Name=\"Entypreter\" DELETE")
self.shell.print_command("wmic /NAMESPACE:\"\\\\root\\subscription\" PATH __FilterToConsumerBinding WHERE \"__PATH like '%Entypreter%'\" DELETE")
return
if task == "AddDropper":
handler.reply(200)
if "true" in data.split("~~~")[0]:
self.shell.print_good("HTA file dropped at "+data.split("~~~")[1].split()[0])
self.shell.print_command("del /f "+data.split("~~~")[1].split()[0])
else:
self.shell.print_error("HTA file could not be dropped. Consider cleaning up and choosing a different DROPDIR.")
return
if task == "DeleteDropper":
handler.reply(200)
if "false" in data.split("~~~")[0]:
self.shell.print_good("HTA file deleted from "+data.split("~~~")[1].split()[0])
else:
self.shell.print_error("HTA file could not be deleted.")
self.shell.print_command("del /f "+data.split("~~~")[1].split()[0])
return
if data == "Complete":
super(WMIPersistJob, self).report(handler, data)
handler.reply(200)
def done(self):
self.results = "Completed!"
self.display()
def display(self):
# self.shell.print_plain(self.data)
pass
class WMIPersistImplant(core.implant.Implant):
NAME = "WMI Persistence"
DESCRIPTION = "Creates persistence using a WMI subscription."
AUTHORS = ["Entynetproject"]
STATE = "implant/persist/wmi"
def load(self):
self.options.register("PAYLOAD", "", "Payload to stage.")
self.options.register("CMD", "", "Command.", hidden=True)
self.options.register("CLEANUP", "false", "Will remove the created user.", enum=["true", "false"])
self.options.register("DIRECTORY", "%TEMP%", "Writeable directory for output.", required=False)
self.options.register("LDROPFILE", "data/implant/persist/wmi.dropper", "Local file to drop on the target.", advanced=True)
self.options.register("DROPDIR", "%ALLUSERSPROFILE%", "Directory to place the drop file.", advanced=True)
self.options.register("FDROPDIR", "", "", hidden=True)
self.options.register("RETRYATTEMPTS", "5", "Number of times to retry calling back before self-terminating (-1 == infinite).")
self.options.register("RETRYDELAY", "60", "Seconds between retry attempts.")
self.options.register("DROPFILE", "", "Name to give the drop file (randomly generated if no name).", advanced=True)
self.options.register("FDROPFILE", "", "", hidden=True)
def job(self):
return WMIPersistJob
def run(self):
id = self.options.get("PAYLOAD")
payload = self.load_payload(id)
if payload is None:
self.shell.print_error("Payload %s not found." % id)
return
payloads = {}
payloads["js"] = "data/implant/persist/wmi.js"
self.dispatch(payloads, self.job)
| [
"noreply@github.com"
] | marciopocebon.noreply@github.com |
8368bfbd0e4777b5c99e341ea7c5c3253dd539f6 | 0626949c68622a787fa5d860d654c862676a77e7 | /muxue/user/adminx.py | 7bff5abe2de350bb8bce165760789b265e48d5c5 | [] | no_license | GBXZ/muxueonline | 37f74d51a9d275bde62197addf3f08ac517a52b8 | bf9af33d65a960e6ac9e796587c3ed9cdceba25d | refs/heads/master | 2020-03-26T22:39:15.556180 | 2018-09-28T23:38:55 | 2018-09-28T23:38:55 | 145,477,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | # _*_ coding:utf-8 _*_
from django.contrib import admin
from .models import EmailVerifyRecord,Banner
from django.db import models
from datetime import datetime
import xadmin #重要
from xadmin import views #修改主题需要导入views
class BaseSetting(object): #修改xadmin主题
enable_themes = True
use_bootswatch = True
class GlobalSettings(object): #修改xadmin头部和底部字体
site_title = "慕学后台管理系统"
site_footer = "慕学在线网"
menu_style = "accordion"
class EmailVerifyRecordAdmin(object):
list_display = ["code","email","send_type","send_time"] #显示的字段
search_fields = ["code","email","send_type"] #能够搜索的字段
list_filter = ["code","email","send_type","send_time"] #过滤器
class BannerAdmin(object):
list_display = ["title","image","url","index","add_time"] #显示的字段
search_fields = ["title","image","url","index"] #能够搜索的字段
list_filter = ["title","image","url","index","add_time"] #过滤器
xadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)
xadmin.site.register(Banner,BannerAdmin)
xadmin.site.register(views.BaseAdminView,BaseSetting) #用来修改xadmin主题
xadmin.site.register(views.CommAdminView,GlobalSettings) #用来修改xadmin底部头部字体
# Register your models here.
| [
"="
] | = |
0e5d6f781d7332442b268007501ec4e99b025beb | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/unsignedInt/Schema+Instance/NISTXML-SV-IV-list-unsignedInt-whiteSpace-1-5.py | 4139e151403a4f62685e832b8f9ab13a90d624e4 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 456 | py | from output.models.nist_data.list_pkg.unsigned_int.schema_instance.nistschema_sv_iv_list_unsigned_int_white_space_1_xsd.nistschema_sv_iv_list_unsigned_int_white_space_1 import NistschemaSvIvListUnsignedIntWhiteSpace1
obj = NistschemaSvIvListUnsignedIntWhiteSpace1(
value=[
0,
561145125,
4003068322,
3467607955,
1588590776,
1695804895,
736059152,
4016801897,
4294967295,
]
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
d8503c891c61ca578f7de87638d0b8b2ee588861 | 4a307849ed4dded5ce84b0ceb6d2cf56c2e64b89 | /fixtures/physical_router_fixture.py | 2735390c29f1263942639fc49bf7dfc581280ae3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | lmadhusudhanan/contrail-test | a6316b41dcb836315d25503f1dee511943d7f976 | bd39ff19da06a20bd79af8c25e3cde07375577cf | refs/heads/master | 2022-05-04T20:01:58.960911 | 2018-06-27T17:56:47 | 2018-06-27T17:56:47 | 138,913,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,453 | py | from netaddr import *
import vnc_api_test
from pif_fixture import PhysicalInterfaceFixture
from physical_device_fixture import PhysicalDeviceFixture
class PhysicalRouterFixture(PhysicalDeviceFixture):
'''Fixture to manage Physical Router objects
Mandatory:
:param name : name of the device
:param mgmt_ip : Management IP
Optional:
:param vendor : juniper
:param model : mx
:param asn : default is 64512
:param ssh_username : Login username to ssh, default is root
:param ssh_password : Login password, default is Embe1mpls
:param :tunnel_ip : Tunnel IP (for vtep)
:ports : List of Ports which are available to use
Inherited optional parameters:
:param domain : default is default-domain
:param project_name : default is admin
:param cfgm_ip : default is 127.0.0.1
:param api_port : default is 8082
:param connections : ContrailConnections object. default is None
:param username : default is admin
:param password : default is contrail123
:param auth_server_ip : default is 127.0.0.1
'''
def __init__(self, *args, **kwargs):
super(PhysicalRouterFixture, self).__init__(self, *args, **kwargs)
self.name = args[0]
self.mgmt_ip = args[1]
self.vendor = kwargs.get('vendor', 'juniper')
self.model = kwargs.get('model','mx')
self.asn = kwargs.get('asn','64512')
self.tunnel_ip = kwargs.get('tunnel_ip', self.mgmt_ip)
self.ports = kwargs.get('ports', [])
self.bgp_router = None
self.bgp_router_already_present = False
try:
if self.inputs.verify_thru_gui():
from webui_test import WebuiTest
self.webui = WebuiTest(self.connections, self.inputs)
self.kwargs = kwargs
except Exception as e:
pass
# end __init__
def create_bgp_router(self):
bgp_router = vnc_api_test.BgpRouter(self.name, parent_obj=self._get_ip_fabric_ri_obj())
params = vnc_api_test.BgpRouterParams()
params.address = self.tunnel_ip
params.address_families = vnc_api_test.AddressFamilies(['route-target',
'inet-vpn', 'e-vpn', 'inet6-vpn'])
params.autonomous_system = int(self.asn)
params.vendor = self.vendor
params.identifier = self.mgmt_ip
bgp_router.set_bgp_router_parameters(params)
bgp_router_id = self.vnc_api_h.bgp_router_create(bgp_router)
bgp_router_obj = self.vnc_api_h.bgp_router_read(id=bgp_router_id)
self.logger.info('Created BGP router %s with ID %s' % (
bgp_router_obj.fq_name, bgp_router_obj.uuid))
return bgp_router_obj
# end create_bgp_router
def delete_bgp_router(self):
self.vnc_api_h.bgp_router_delete(id=self.bgp_router.uuid)
self.logger.info('Deleted BGP router : %s' % (self.bgp_router.uuid))
def add_bgp_router(self, bgp_router):
self.phy_device = self.vnc_api_h.physical_router_read(id=self.phy_device.uuid)
self.phy_device.add_bgp_router(bgp_router)
self.vnc_api_h.physical_router_update(self.phy_device)
def unbind_bgp_router(self, bgp_router):
self.phy_device = self.vnc_api_h.physical_router_read(id=self.phy_device.uuid)
self.phy_device.del_bgp_router(bgp_router)
self.vnc_api_h.physical_router_update(self.phy_device)
def delete_device(self):
self.phy_device = self.vnc_api_h.physical_router_read(id=self.phy_device.uuid)
self.phy_device.del_bgp_router(self.bgp_router)
self.vnc_api_h.physical_router_update(self.phy_device)
super(PhysicalRouterFixture, self).delete_device()
def setUp(self):
super(PhysicalRouterFixture, self).setUp()
bgp_fq_name = ['default-domain', 'default-project',
'ip-fabric', '__default__', self.name]
try:
self.bgp_router = self.vnc_api_h.bgp_router_read(
fq_name=bgp_fq_name)
self.already_present = True
self.logger.info('BGP router %s already present' % (
bgp_fq_name))
self.bgp_router_already_present = True
except vnc_api_test.NoIdError:
if self.inputs.is_gui_based_config():
self.bgp_router = self.webui.create_bgp_router(self)
else:
self.bgp_router = self.create_bgp_router()
try:
if not self.inputs.is_gui_based_config():
self.add_bgp_router(self.bgp_router)
except Exception as e:
pass
self.router_session = self.get_connection_obj(self.vendor,
host=self.mgmt_ip,
username=self.ssh_username,
password=self.ssh_password,
logger=self.logger)
def cleanUp(self):
super(PhysicalRouterFixture, self).cleanUp()
do_cleanup = True
if self.bgp_router_already_present:
do_cleanup = False
if do_cleanup:
if self.inputs.is_gui_based_config():
self.webui.delete_bgp_router(self)
else:
self.delete_bgp_router()
def get_irb_mac(self):
return self.router_session.get_mac_address('irb')
def get_virtual_gateway_mac(self, ip_address):
return self.router_session.get_mac_in_arp_table(ip_address)
# end PhysicalRouterFixture
if __name__ == "__main__":
pass
| [
"lmadhusudhan@juniper.net"
] | lmadhusudhan@juniper.net |
e7bc050fb62cf8278450687a7d0018fb13307a67 | 5cff419c080e87ac82e17c9cee8329faa6b66188 | /eventsourcing/tests/test_thespian_runner_with_django.py | d58bb32f104b867ffd389700578e99a714b32868 | [
"BSD-3-Clause"
] | permissive | Shaibujnr/eventsourcing | 56ab35e44a634822a3ce22562c20cfa83b24a73f | a2d8a7ff728f89714f0529791f3bd56498297784 | refs/heads/master | 2022-06-05T04:46:18.159071 | 2020-03-11T18:46:44 | 2020-03-11T18:46:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from eventsourcing.tests.sequenced_item_tests.test_django_record_manager import (
DjangoTestCase,
)
from eventsourcing.application.django import DjangoApplication
from eventsourcing.tests.test_thespian_runner import TestThespianRunner
class TestThespianRunnerWithDjango(DjangoTestCase, TestThespianRunner):
infrastructure_class = DjangoApplication
| [
"john.bywater@appropriatesoftware.net"
] | john.bywater@appropriatesoftware.net |
106728c42ed86048ef123884e38430115efdd1af | c2d4968c32a4356138a82d9684c86d8759b6f47b | /groupdocs/models/SignatureEnvelopeDocumentsResult.py | 30939f8d6839534c8d55bb88f5e1c622bf254a4a | [] | no_license | liosha2007/groupdocs-heroku-examples-for-python | 77ffe432883f266dc049a8bc4e966fd86a717577 | a96a89c7d9e8798fd5bf769e0c929dfaa1702bf9 | refs/heads/master | 2021-01-01T19:43:27.526546 | 2013-08-05T05:43:34 | 2013-08-05T05:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | #!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureEnvelopeDocumentsResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'documents': 'list[SignatureEnvelopeDocumentInfo]',
'envelopeId': 'str'
}
self.documents = None # list[SignatureEnvelopeDocumentInfo]
self.envelopeId = None # str
| [
"aleksey.permyakov@groupdocs.com"
] | aleksey.permyakov@groupdocs.com |
e6108fb32123d4fbee90708c4cf86bb1e6b75ce0 | ff23e5c890216a1a63278ecb40cd7ac79ab7a4cd | /clients/kratos/python/test/test_message.py | d7bbd553c9e96215a809d9ad5f8b74270dafedb9 | [
"Apache-2.0"
] | permissive | ory/sdk | fcc212166a92de9d27b2dc8ff587dcd6919e53a0 | 7184e13464948d68964f9b605834e56e402ec78a | refs/heads/master | 2023-09-01T10:04:39.547228 | 2023-08-31T08:46:23 | 2023-08-31T08:46:23 | 230,928,630 | 130 | 85 | Apache-2.0 | 2023-08-14T11:09:31 | 2019-12-30T14:21:17 | C# | UTF-8 | Python | false | false | 1,284 | py | """
Ory Identities API
This is the API specification for Ory Identities with features such as registration, login, recovery, account verification, profile settings, password reset, identity management, session management, email and sms delivery, and more. # noqa: E501
The version of the OpenAPI document: v1.0.0
Contact: office@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_kratos_client
from ory_kratos_client.model.courier_message_status import CourierMessageStatus
from ory_kratos_client.model.courier_message_type import CourierMessageType
from ory_kratos_client.model.message_dispatch import MessageDispatch
globals()['CourierMessageStatus'] = CourierMessageStatus
globals()['CourierMessageType'] = CourierMessageType
globals()['MessageDispatch'] = MessageDispatch
from ory_kratos_client.model.message import Message
class TestMessage(unittest.TestCase):
"""Message unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMessage(self):
"""Test Message"""
# FIXME: construct object with mandatory attributes with example values
# model = Message() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"3372410+aeneasr@users.noreply.github.com"
] | 3372410+aeneasr@users.noreply.github.com |
a87b819f18508bfd72a1ccd2fc1fce02dfdf811c | 4133fce90189868a246e916a5851dc05bf5e2172 | /eOwner/bookbus/forms.py | 6c08628a00148c0cc38fe490fd975d8d288ad4a9 | [] | no_license | achanyasuresh12/eowner | 47ea8dfeb7dc63c1ea934d1d05f0714f14a3fde8 | bcfd6d3069a34e3bde7c760a60d43a7553078b23 | refs/heads/master | 2020-04-07T19:10:59.995107 | 2018-11-22T04:24:15 | 2018-11-22T04:24:15 | 158,639,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from django import forms
from . import models
class bookbus_forms(forms.ModelForm):
class Meta:
model = models.Bookbus
fields = ["source", "destination", "date"]
| [
"45094866+achanyasuresh12@users.noreply.github.com"
] | 45094866+achanyasuresh12@users.noreply.github.com |
a161fd71a289da90daa7f083d6e9669340fa178b | 5c7da7dabdc076ad7113ccd20561a8bbf5f9a70e | /documents/migrations/0039_auto_20200921_1652.py | 14c75f0cf092c56569d4c8734f95d131ab823e9a | [] | no_license | aqcloudacio/cloudaciofeez | 2499fb5fc5334fa871daab2abea6c34bfa8c7667 | 8399560ece9aa10a6d6801f42c027dca26a65936 | refs/heads/master | 2023-02-27T22:36:20.501159 | 2021-02-11T00:03:46 | 2021-02-11T00:03:46 | 337,887,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | # Generated by Django 2.2.7 on 2020-09-21 06:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('documents', '0038_theme_default'),
]
operations = [
migrations.AlterField(
model_name='structure',
name='theme',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='structures', to='documents.Theme'),
preserve_default=False,
),
migrations.AddConstraint(
model_name='structure',
constraint=models.UniqueConstraint(fields=('theme',), name='unique_theme'),
),
]
| [
"alejandro.quintero@clouxter.com"
] | alejandro.quintero@clouxter.com |
fcf1b34b972a4a2c7edd899130321198c2ddb57c | ff62ac78e34cdaf6d5d3bc7230a4f4aee740b142 | /HigherLowerGameProject/main.py | 6276b3debd940e6df69de059668e95a99a27fdd8 | [] | no_license | suriyaganesh97/pythonbasicprogs | 7ff67ca6193150d9c61e1eb10e2727694d9b7c6e | ffb599f1804654785757fea4b0f36b11094a4fae | refs/heads/master | 2022-01-03T22:07:27.625938 | 2022-01-02T11:35:28 | 2022-01-02T11:35:28 | 253,271,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | import random
from gameData import data
#print two random person initially followers store in variable
dict_1 = random.choice(data)
dict_1_follower_count = dict_1["follower_count"]
is_game_over = False
current_score = 0
while not is_game_over:
dict_2 = random.choice(data)
dict_2_follower_count = dict_2["follower_count"]
print(f'A is {dict_1["name"]}, description is {dict_1["description"]}, country is {dict_1["country"]}')
print(f'B is {dict_2["name"]}, description is {dict_2["description"]}, country is {dict_2["country"]}')
# print statemnets for debugging
print(dict_1_follower_count)
print(dict_2_follower_count)
user_guess = input("who do you think has high no of followers A or B: ")
if user_guess == "A" or user_guess == "a":
if dict_1_follower_count > dict_2_follower_count:
print(f'yes you are right.{dict_1["name"]} has {dict_1_follower_count} followers and {dict_2["name"]} has only {dict_2_follower_count} followers')
current_score += 1
elif dict_2_follower_count > dict_1_follower_count:
print(f'you are wrong.{dict_2["name"]} has {dict_2_follower_count} followers while {dict_1["name"]} has only {dict_1_follower_count} followers')
print(f"your total score is {current_score}")
is_game_over = True
else:
print(f'both have same no of {dict_1_follower_count} followers')
elif user_guess == "B" or user_guess == "b":
if dict_2_follower_count > dict_1_follower_count:
print(f'yes you are right.{dict_2["name"]} has {dict_2_follower_count} followers and {dict_1["name"]} has only {dict_1_follower_count} followers')
current_score += 1
dict_1 = dict_2
elif dict_1_follower_count > dict_2_follower_count:
print(f'you are wrong.{dict_1["name"]} has {dict_1_follower_count} followers while {dict_2["name"]} has only {dict_2_follower_count} followers')
print(f"your total score is {current_score}")
is_game_over = True
else:
print(f'both have same no of {dict_1_follower_count} followers')
else:
print("invalid input")
is_game_over = True
| [
"63084594+suriyaganesh97@users.noreply.github.com"
] | 63084594+suriyaganesh97@users.noreply.github.com |
6e599fcae811e39f224deb33642d5b53f53e2cc0 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/anagram/aa70888571c94b16a02aa4c0091b8a3d.py | c2c1e5316f2a4644a2bd524ee4689277fd168164 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 525 | py | def detect_anagrams(word, list):
final = []
count = {}
for s in word.lower():
if count.has_key(s):
count[s] += 1
else:
count[s] = 1
for w in list:
if w.lower() != word:
wcount = {}
for s in w.lower():
if wcount.has_key(s):
wcount[s] += 1
else:
wcount[s] = 1
if wcount == count:
final.append(w)
return final
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
897f1e3a9e728d1fa9d621e94dc4fd26b2179861 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_203512.89+152029.3/sdB_sdssj_203512.89+152029.3_lc.py | ed34883c535f2590c57d4ff4b91b6ce2db5f9d5f | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[308.803708,15.341472], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_203512.89+152029.3/sdB_sdssj_203512.89+152029.3_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
b28f4481f84cc96b3adba71e3715fbe9ff783705 | f2cece9e5f2af8482c12fc7ad8b3a7e63e6de052 | /tbot/handlers/user/start.py | 4b2ae37538b4ce57deaca2c8b96805159c068fa2 | [] | no_license | nikifkon-old/questionnaire_bot | beadc716ca0a7cbfa6a4c47039c00123e8892eb4 | 3cbf889c7edf4ba438ce7e46c5f9b67efe5d7e72 | refs/heads/master | 2023-04-24T07:12:28.227259 | 2020-08-03T09:14:35 | 2020-08-03T09:14:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | from aiogram import types
from tbot import messages, schemas
from tbot.bot import bot
from tbot.handlers.utils import process_if_user_exit, send_welcome_message
from tbot.utils import save_user
from .register import bot_register
async def bot_start(message: types.Message):
"""
/start command handler
"""
chat_id = message.chat.id
continue_ = await process_if_user_exit(user_id=chat_id)
if continue_:
payload = message.get_args()
if payload:
data, created = schemas.House.from_string(payload)
if created:
user_lang = message.from_user.language_code
user = schemas.User(id=chat_id, house=data, lang=user_lang)
save_user(user)
await send_welcome_message(user)
else:
await bot.send_message(
chat_id=chat_id,
text=messages.INVALID_START_PAYLOAD_ERROR.format(error_message=data["error_msg"])
)
else:
await bot.send_message(
chat_id=chat_id,
text=messages.START_MESSAGE
)
await bot_register(message)
| [
"kostya.nik.3854@gmail.com"
] | kostya.nik.3854@gmail.com |
f811d2d33cb606d533dd48f19f66579b24eae8f0 | 81d2e3b6fe042e70cc2abb7f549f60ba44928fdf | /binarysearch/167.two-sum-ii-input-array-is-sorted.py | 5263d1575f7673caccb884b02909c3aa8860a62f | [] | no_license | weizhixiaoyi/leetcode | a506faed3904342ed65234864df52071977d544d | 6114ebacc939f48a39a56d366646b0f28b4f6c1a | refs/heads/master | 2022-12-22T03:52:07.936800 | 2020-09-29T07:49:52 | 2020-09-29T07:49:52 | 202,662,720 | 5 | 2 | null | 2019-08-17T09:24:49 | 2019-08-16T05:16:08 | C++ | UTF-8 | Python | false | false | 1,303 | py | # -*- coding:utf-8 -*-
from typing import List
class Solution:
# def twoSum(self, numbers: List[int], target: int) -> List[int]:
# nums_len = len(numbers)
#
# def binary_search(line, k):
# left, right = 0, len(line) - 1
# while left <= right:
# mid = left + (right - left) // 2
# if line[mid] <= k:
# left = mid + 1
# if line[mid] > k:
# right = mid - 1
# return left - 1 if line[left - 1] == k else -1
#
# for k in range(0, nums_len):
# idx = binary_search(numbers, target - numbers[k])
# if idx != -1:
# return [k + 1, idx + 1]
# 双指针
def twoSum(self, numbers: List[int], target: int) -> List[int]:
cur_sum = 0
left, right = 0, len(numbers) - 1
while left < right:
cur_sum = numbers[left] + numbers[right]
if cur_sum == target:
return [left + 1, right + 1]
if cur_sum < target:
left += 1
if cur_sum > target:
right -= 1
if __name__ == '__main__':
values = [1, 2, 3, 4, 4, 5]
target = 8
ans = Solution().twoSum(values, target)
print('ans: ', ans)
| [
"zhenhai.gl@gmail.com"
] | zhenhai.gl@gmail.com |
79f82030a48735a86adfccce5e9f80853fc062fe | 81a62053841c03d9621fd31f8e7984c712c7aed2 | /zoo/BEVDepth/exps/mv/bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da.py | 5ed3cd169fb3435f26326fe5c7432fe9302e61be | [
"MIT"
] | permissive | Daniel-xsy/BEV-Attack | d0eb3a476875f9578c53df9bcb21564dea18ce0c | 7970b27396c1af450c80b12eb312e76a8ab52a0a | refs/heads/master | 2023-05-23T01:13:44.121533 | 2023-02-22T05:48:14 | 2023-02-22T05:48:14 | 540,328,937 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,038 | py | # Copyright (c) Megvii Inc. All rights reserved.
"""
mAP: 0.3484
mATE: 0.6159
mASE: 0.2716
mAOE: 0.4144
mAVE: 0.4402
mAAE: 0.1954
NDS: 0.4805
Eval time: 110.7s
Per-class results:
Object Class AP ATE ASE AOE AVE AAE
car 0.553 0.480 0.157 0.117 0.386 0.205
truck 0.252 0.645 0.202 0.097 0.381 0.185
bus 0.378 0.674 0.197 0.090 0.871 0.298
trailer 0.163 0.932 0.230 0.409 0.543 0.098
construction_vehicle 0.076 0.878 0.495 1.015 0.103 0.344
pedestrian 0.361 0.694 0.300 0.816 0.491 0.247
motorcycle 0.319 0.569 0.252 0.431 0.552 0.181
bicycle 0.286 0.457 0.255 0.630 0.194 0.006
traffic_cone 0.536 0.438 0.339 nan nan nan
barrier 0.559 0.392 0.289 0.124 nan nan
"""
import torch
import torch.nn as nn
from torch.cuda.amp.autocast_mode import autocast
from torch.optim.lr_scheduler import MultiStepLR
from exps.base_cli import run_cli
from exps.mv.bev_depth_lss_r50_256x704_128x128_24e_2key import \
BEVDepthLightningModel as BaseBEVDepthLightningModel
from layers.backbones.base_lss_fpn import BaseLSSFPN as BaseLSSFPN
from layers.heads.bev_depth_head import BEVDepthHead
from models.base_bev_depth import BaseBEVDepth as BaseBEVDepth
class DepthAggregation(nn.Module):
"""
pixel cloud feature extraction
"""
def __init__(self, in_channels, mid_channels, out_channels):
super(DepthAggregation, self).__init__()
self.reduce_conv = nn.Sequential(
nn.Conv2d(in_channels,
mid_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
)
self.conv = nn.Sequential(
nn.Conv2d(mid_channels,
mid_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels,
mid_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
)
self.out_conv = nn.Sequential(
nn.Conv2d(mid_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True),
# nn.BatchNorm3d(out_channels),
# nn.ReLU(inplace=True),
)
@autocast(False)
def forward(self, x):
x = self.reduce_conv(x)
x = self.conv(x) + x
x = self.out_conv(x)
return x
class LSSFPN(BaseLSSFPN):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.depth_aggregation_net = self._configure_depth_aggregation_net()
def _configure_depth_aggregation_net(self):
"""build pixel cloud feature extractor"""
return DepthAggregation(self.output_channels, self.output_channels,
self.output_channels)
def _forward_voxel_net(self, img_feat_with_depth):
# BEVConv2D [n, c, d, h, w] -> [n, h, c, w, d]
img_feat_with_depth = img_feat_with_depth.permute(
0, 3, 1, 4, 2).contiguous() # [n, c, d, h, w] -> [n, h, c, w, d]
n, h, c, w, d = img_feat_with_depth.shape
img_feat_with_depth = img_feat_with_depth.view(-1, c, w, d)
img_feat_with_depth = (
self.depth_aggregation_net(img_feat_with_depth).view(
n, h, c, w, d).permute(0, 2, 4, 1, 3).contiguous().float())
return img_feat_with_depth
class BEVDepth(BaseBEVDepth):
def __init__(self, backbone_conf, head_conf, is_train_depth=True):
super(BaseBEVDepth, self).__init__()
self.backbone = LSSFPN(**backbone_conf)
self.head = BEVDepthHead(**head_conf)
self.is_train_depth = is_train_depth
class BEVDepthLightningModel(BaseBEVDepthLightningModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = BEVDepth(self.backbone_conf,
self.head_conf,
is_train_depth=True)
self.data_use_cbgs = True
def configure_optimizers(self):
lr = self.basic_lr_per_img * \
self.batch_size_per_device * self.gpus
optimizer = torch.optim.AdamW(self.model.parameters(),
lr=lr,
weight_decay=1e-7)
scheduler = MultiStepLR(optimizer, [16, 19])
return [[optimizer], [scheduler]]
if __name__ == '__main__':
run_cli(BEVDepthLightningModel,
'bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da')
| [
"1491387884@qq.com"
] | 1491387884@qq.com |
54b776d05894dbd7304491d291348150d9dee7f7 | 251af797da940483d843077cfe1912acd019e73e | /sis/schedule/migrations/0001_initial.py | bddf4e281d27779ecb610321405d32bca51de9cf | [] | no_license | mitctc/sis | 951d57fce1376947cbc6a00594d17c1cfb5f78d6 | 9a955b6c12cae977bd45ff6025a87b492fa0f6e1 | refs/heads/master | 2021-05-30T13:12:10.705276 | 2016-02-14T02:23:01 | 2016-02-14T02:23:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,537 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-09 14:09
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course', '0010_auto_20160209_0828'),
]
operations = [
migrations.CreateModel(
name='LabSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('start_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('end_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('is_present', models.BooleanField()),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Activity')),
],
),
migrations.CreateModel(
name='PracticalSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('start_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('end_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('is_present', models.BooleanField()),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Activity')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TheorySession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('start_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('end_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('is_present', models.BooleanField()),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Activity')),
],
),
]
| [
"sumudu.susahe@gmail.com"
] | sumudu.susahe@gmail.com |
27e371636f02b8abf128178364910ed7e959ff49 | 891902687207fb335b65dbb8d31d6e20301764f9 | /pe103.py | 87cd77af804629f8d4877da843a2d8a0c204e281 | [] | no_license | maecchi/PE | 93bd050eaca2733aa37db6ca493b820fe3d7a351 | 3d9092635807f0036719b65adb16f1c0926c2321 | refs/heads/master | 2020-05-04T16:38:36.476355 | 2012-06-10T05:26:10 | 2012-06-10T05:26:10 | 1,746,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# pe103.py - Project Euler
#
# 題意の規則に乗っ取って条件を作成する
from itertools import *
N = 7
#
def findsSum(a, n):
if n == 0:
return True
elif len(a) == 0:
return False
elif n < 0:
return False
else:
a2 = a[1:]
return any((findsSum(a2, n - a[0]), findsSum(a2, n)))
# 部分集合の和が等しくてはならない
def condition1(a):
def c2_core(b):
s = sum(b)
if s % 2 == 1:
return True
half = s / 2
back = b[-1]
n = half - back
return not findsSum(b[:-1], n)
n = len(a)
return all(c2_core(b) for m in range(3, n+1) for b in combinations(a, m))
# 部分集合は多くの要素を含んでいた方が値が大きい
def condition2(a):
n = len(a)
return sum(a[:(n+1)/2]) > sum(a[n/2+1:])
# 条件を両方とも満たすか確認
def isValid(a):
return condition2(a) and condition1(a)
# 集合を作成する
# a: 集合要素
# prev: 前の要素
# k: 対象要素番号
def genSets(a, prev = 0 , k = 0):
if k == len(a):
yield []
else:
begin = max(a[k] - 2, prev + 1) # -2から+1の範囲で次の値を考える
if k == len(a) - 1:
end = a[k] + 1
else :
end = min(a[k] + 1, a[k+1] - 1)
for p in range(begin, end+1):
for b in genSets(a, p, k+1):
yield [p] + b
# 最小値を求める
def minimize(a):
solutions = [b for b in genSets(a) if isValid(b)]
return min(solutions, key = lambda a: sum(a))
# 次の最適集合を求める
def nextSet(a):
n = len(a)
b = a[n/2]
return minimize([b] + [e + b for e in a])
a = reduce(lambda x, y : nextSet(x), range(2, N+1), [1])
print "".join(map(str, a))
| [
"aos81922710@yahoo.co.jp"
] | aos81922710@yahoo.co.jp |
c174095d9ba03d8fb0442391289c0b3348b8c63e | ee052fca7b7cdf875b3e18f28f6102395407d584 | /python/example/objects.py | 2218d352afd3c9582e59cca26ab888f4c26106a8 | [] | no_license | vimm0/algorithm-challenges | 8313813dbbba0dff4c0d872d68c74f9b508d956f | f4ae12b7a33340b8f3c4ce105b7ef3fb9f24e613 | refs/heads/master | 2020-03-26T23:51:52.913657 | 2018-10-30T16:16:52 | 2018-10-30T16:16:52 | 145,573,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | #!/usr/bin/python
from builtins import staticmethod, str
class InvoiceItem:
"""
An item within the invoice
"""
def __str__(self):
return self.text + ' ' + str(self.amt)
def __init__(self, **kwargs):
self.text = ''
self.amt = 0
if kwargs['text'] is not None:
self.text = kwargs['text']
if kwargs['amt'] is not None:
self.amt = kwargs['amt']
class Invoice:
# This is a static variable
my_company_name = "DIYComputerScience"
# This is a static method
@staticmethod
def get_service_tax_per():
return 12.5
'''An invoice.'''
def __str__(self):
return self.number + ' ' + str(self.amt())
def __init__(self, **kwargs):
self.number = ''
self.client = ''
self.date = ''
self.invoice_items = []
def add_invoice_item(self, invoice_entry):
self.invoice_items.append(invoice_entry)
def amt(self):
amt = 0
for item in self.invoice_items:
amt = amt + item.amt
return amt
invoice = Invoice()
invoice.number = '20080422_01'
invoice.client = 'Sun Microsystems'
invoice.date = '22/04/2008'
invoice_item = InvoiceItem(text='consulting April', amt=2000)
invoice.add_invoice_item(invoice_item)
print(invoice)
| [
"vimmrana0@gmail.com"
] | vimmrana0@gmail.com |
b1aa1154a111a4802c9c3e765a47373412a8820d | e52afdf311d9b682fd2edfa2ac131bd83bbe63eb | /Week 2/1-2/knn.py | 54420f3213ba04fc5bc1f0cd9f67980cd384cb91 | [] | no_license | YashwanthMN1/MLEdyoda | cc1185e4618e896764a0b01773a886e49ba9b8e7 | 36a9470729c57c7b6b742bac239e9352f8b2a133 | refs/heads/main | 2023-05-02T11:51:11.801693 | 2021-05-25T13:52:24 | 2021-05-25T13:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 9 13:11:46 2021
@author: RISHBANS
"""
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2,3]].values
y = dataset.iloc[:, 4].values
X = X.astype(float)
y = y.astype(float)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
#https://machine-arena.blogspot.com/2020/04/standardscaler-why-fittransform-for.html
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 11)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
cm = confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred, target_names=["yes", "no"]))
print(cm) | [
"rishibansal02@gmail.com"
] | rishibansal02@gmail.com |
2cd3388cd66000a0c14eee28eb57bb0be2033b95 | 6ccd833a6bc8eb2d7cadbaf64ba9351d87c8d1bd | /Handlers/User/Driver.py | db2b2045ecd5c5a92ae606173af52da3ffa7ed4a | [] | no_license | elaineo/barnacle-gae | e00691235160d140fb5004b34988d30811ef4102 | c706683cf448dc5763bb2ce8ea2f5968fcefb375 | refs/heads/master | 2021-01-01T03:47:28.083451 | 2014-11-18T16:47:42 | 2014-11-18T16:47:42 | 59,609,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,078 | py | from Handlers.BaseHandler import *
from Models.ImageModel import *
from Models.User.Account import *
from Models.User.Driver import *
from Handlers.ImageHandler import *
from google.appengine.api import images
from google.appengine.api import users
import logging
import json
class DriverHandler(BaseHandler):
def get(self, key=None):
if not key:
if not self.user_prefs:
self.redirect('/#signin-box')
else:
self.__index()
else:
self.__public(key)
def __index(self):
""" User profile information page """
self.params['createorupdate'] = 'Ready to Drive'
d = Driver.by_userkey(self.user_prefs.key)
self.params['driver'] = True
if d: #existing driver
self.redirect('/route')
return
# else first time being driver
self.params.update(self.user_prefs.params_fill())
self.render('user/forms/filldriver.html', **self.params)
def post(self,action=None):
if not self.user_prefs: # if user is not logged in, redirect to login page, which will redirect back
self.redirect('/#signin-box')
if not action:
self.__profile()
elif action=='photo':
self.__updateimg()
def __updateimg(self):
img = self.request.get("file")
if img:
d = Driver.by_userkey(self.user_prefs.key)
if d.img_id: # existing image
imgstore = ImageStore.get_by_id(d.img_id)
imgstore.update(img)
else: # new image
imgstore = ImageStore.new(img)
imgstore.put()
d.img_id = imgstore.key.id()
d.put()
self.response.headers['Content-Type'] = "application/json"
response = {'status':'ok', 'img_url': d.vehicle_image_url()}
self.write(json.dumps(response))
def __profile(self):
user_prefs = self.user_prefs
user_prefs.first_name = self.request.get("first_name").capitalize()
user_prefs.last_name = self.request.get("last_name")
user_prefs.about = self.request.get("about").replace('\n','')
# validate
location = self.request.get("loc")
if location:
user_prefs.location = location
lat = self.request.get('startlat')
lon = self.request.get('startlon')
if lat and lon:
user_prefs.locpt = ndb.GeoPt(lat,lon)
# image upload
img = self.request.get("file")
if img:
if user_prefs.img_id and user_prefs.img_id>=0: # existing image
imgstore = ImageStore.get_by_id(user_prefs.img_id)
imgstore.update(img)
else: # new image
imgstore = ImageStore.new(img)
imgstore.put()
user_prefs.img_id = imgstore.key.id()
user_prefs.put()
logging.info('User turns into a driver')
self.redirect(user_prefs.profile_url()) | [
"elaine.ou@gmail.com"
] | elaine.ou@gmail.com |
33a624738cbd967c7805dc0b4eae16b9d1fecd8b | ba2dbc19e899faaa17b994a1224e455a3de5b9ad | /01_jump to python/CHAP07/2_Regular_Expression_Practice/q8.py | b16778f7c4b0ffa22de4f29cb9db0a0d2a3226d3 | [] | no_license | xsky21/bigdata2019 | 52d3dc9379a05ba794c53a28284de2168d0fc366 | 19464a6f8862b6e6e3d4e452e0dab85bdd954e40 | refs/heads/master | 2020-04-21T10:56:34.637812 | 2019-04-16T04:16:27 | 2019-04-16T04:16:27 | 169,503,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # 숫자 0 혹은 알파벳 b 여러 개가 알파벳 a 뒤에오는 문자열을 찾는 파이썬 프로그램을 만들어라
import re
def matching(answer):
p = re.compile("[A-Z][a-z]+")
m = p.search(answer)
print(m)
matching("Azzzzz")
matching("AAaaaa")
matching("AA")
matching("abbb")
#여기선 b가 세 개일 때, b 세개를 출력해주지만. b2개를 출력시키고 싶다면 |를 써야된 | [
"studerande5@gmail.com"
] | studerande5@gmail.com |
445d03f9d2bd2d6754c4785b3446d88b3eddc2f4 | 699a43917ce75b2026a450f67d85731a0f719e01 | /using_python/148_sort_for_listNode.py | 8638dd3b7dfca07b44ec645edaf51ef5fb6dd872 | [] | no_license | wusanshou2017/Leetcode | 96ab81ae38d6e04739c071acfc0a5f46a1c9620b | c4b85ca0e23700b84e4a8a3a426ab634dba0fa88 | refs/heads/master | 2021-11-16T01:18:27.886085 | 2021-10-14T09:54:47 | 2021-10-14T09:54:47 | 107,402,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def sortList(self, head: ListNode) -> ListNode:
if not head.next:
return []
pre = head.val
dummy = head
while dummy.next:
pre = dummy.val
cur = dummy.next.val
if cur < pre:
dummy.val, dummy.next.val = cur, pre
def cut(self, head: ListNode) -> ListNode:
P = head
while (n - 1) and p:
p = p.next
n -= 1
if not p:
return None
ce = p.next
ce.next = None
return ce
def merge(seq1, seq2):
dummy = ListNode()
| [
"252652905@qq.com"
] | 252652905@qq.com |
c2cfa7dbfec1bae1baa2995125e12cacf16d1d4b | fab215713c1b72974a0dc7db73a20e4b5abefe4a | /简明python教程/data-structure/tuple/using_tuple.py | d8e928575aac20cd564ebf278957b92d0c8a5898 | [] | no_license | cwdgit/learn-python | e6774dcea506cfa461bfccc001205bf75a1d126b | c5ba97a917bd2b8d7b767ce704ca5ff441b9dfee | refs/heads/master | 2020-03-24T16:41:47.837953 | 2018-08-23T03:13:04 | 2018-08-23T03:13:04 | 142,832,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | #!/usr/bin/python
#filename: using_tuple.py
zoo=('wolf','elephant','penguin')
print zoo
print 'number of animals in the zoo is', len(zoo)
new_zoo=('monkey','dolphin',zoo)
print 'number of animals in the zoo is ', len(new_zoo)
print 'all animals in new zoo are',new_zoo
print 'animals brought from old zoo are ' ,new_zoo[2]
print 'last animal brought from old zoo is ', new_zoo[2][2]
| [
"you@example.com"
] | you@example.com |
b0b6eab9b83d55d521265d739f7a459d8fef349c | 860c31e414c4c280b70ec0872042d715a2d56978 | /torch_ecg/models/cnn/efficientnet.py | ee0521f6eb097623f4385fdca0f8d28cfb057f82 | [
"MIT"
] | permissive | DeepPSP/torch_ecg | 255e49ff436e13044a1f049141f982680e56970e | a40c65f4fefa83ba7d3d184072a4c05627b7e226 | refs/heads/master | 2023-09-01T06:47:17.153216 | 2023-08-31T18:00:47 | 2023-08-31T18:00:47 | 298,482,237 | 111 | 16 | MIT | 2023-08-21T11:25:07 | 2020-09-25T06:03:17 | Python | UTF-8 | Python | false | false | 2,317 | py | """
EfficientNet.
References
----------
1. Tan, M., & Le, Q. V. (2019). Efficientnet: Rethinking model scaling for convolutional neural networks. arXiv preprint arXiv:1905.11946.
2. Tan, M., & Le, Q. V. (2021). Efficientnetv2: Smaller models and faster training. arXiv preprint arXiv:2104.00298.
3. https://github.com/google/automl
"""
from typing import List
from torch import nn
from ...models._nets import ( # noqa: F401
Conv_Bn_Activation,
DownSample,
GlobalContextBlock,
NonLocalBlock,
SEBlock,
)
from ...utils import SizeMixin, CitationMixin
__all__ = [
"EfficientNet",
]
class EfficientNet(nn.Module, SizeMixin, CitationMixin):
"""
Reference
---------
1. Tan, M., & Le, Q. V. (2019). Efficientnet: Rethinking model scaling for convolutional neural networks. arXiv preprint arXiv:1905.11946.
2. https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py
3. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py
3. https://github.com/google/automl
"""
__name__ = "EfficientNet"
def __init__(self, in_channels: int, **config) -> None:
super().__init__()
raise NotImplementedError
def forward(self):
raise NotImplementedError
def compute_output_shape(self):
raise NotImplementedError
@property
def doi(self) -> List[str]:
return list(set(self.config.get("doi", []) + ["10.48550/ARXIV.1905.11946"]))
class EfficientNetV2(nn.Module, SizeMixin):
"""
Reference
---------
1. Tan, M., & Le, Q. V. (2021). Efficientnetv2: Smaller models and faster training. arXiv preprint arXiv:2104.00298.
2. https://github.com/d-li14/efficientnetv2.pytorch/blob/main/effnetv2.py
3. https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py
4. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py
5. https://github.com/google/automl
"""
__name__ = "EfficientNetV2"
def __init__(self, in_channels: int, **config) -> None:
super().__init__()
raise NotImplementedError
def forward(self):
raise NotImplementedError
def compute_output_shape(self):
raise NotImplementedError
| [
"wenh06@gmail.com"
] | wenh06@gmail.com |
02850738d65612e07d146e31e9908775f0fcd8a5 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayEbppInstserviceDeductSignResponse.py | c249afbff2e18df95caf317c18a1120b1533b56f | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,026 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEbppInstserviceDeductSignResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppInstserviceDeductSignResponse, self).__init__()
self._error_code = None
self._process_id = None
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
@property
def process_id(self):
return self._process_id
@process_id.setter
def process_id(self, value):
self._process_id = value
def parse_response_content(self, response_content):
response = super(AlipayEbppInstserviceDeductSignResponse, self).parse_response_content(response_content)
if 'error_code' in response:
self.error_code = response['error_code']
if 'process_id' in response:
self.process_id = response['process_id']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
b48ba99ef0bcc9fec5618036a29b6dce473fd246 | f22d31484a12d001826c1775a6f2d245a720fce8 | /Introdução à Programação com Python/Do autor/Códigi fonte e listagem/listagem/capitulo 08/08.26 - Funções como parâmetro.py | fda15eae9a584a943ce075871ac6c0de07ce0b35 | [] | no_license | eduardoprograma/linguagem_Python | 9eb55f0a5a432a986e047b091eb7ed7152b7da67 | 942aba9146800fc33bbea98778467f837396cb93 | refs/heads/master | 2021-07-07T20:48:37.673101 | 2020-07-31T21:24:17 | 2020-07-31T21:24:17 | 159,852,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 08\08.26 - Funções como parâmetro.py
##############################################################################
def soma(a,b):
return a+b
def subtração(a,b):
return a-b
def imprime(a,b, foper):
print(foper(a,b))
imprime(5,4, soma)
imprime(10,1, subtração)
| [
"eduardo.candido@fatec.sp.gov.br"
] | eduardo.candido@fatec.sp.gov.br |
f1abe9b49397d2ab709e03ce4589f5b19498c455 | 3b574a8d1f9cd0bde99f2e94c3c6e7b59ab46ee3 | /project/apps/core/fields.py | ac9c8184ec24e84bb7848ffeecbfdcb6a207b9c7 | [
"BSD-2-Clause"
] | permissive | barberscore/archive-api | 7ac908d2754f6fa5c387bf6e49c257424887f9b3 | d6cf8867ad60c6ae334a555881c06c71069fa12c | refs/heads/master | 2023-02-24T19:45:23.973475 | 2022-01-26T14:35:10 | 2022-01-26T14:35:10 | 202,747,844 | 0 | 0 | BSD-2-Clause | 2023-02-08T00:52:56 | 2019-08-16T15:01:15 | Python | UTF-8 | Python | false | false | 4,383 | py | import os
import string
from datetime import date
import phonenumbers
import six
import pytz
from django.db.models import EmailField, CharField, DateField
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from rest_framework_json_api import serializers
from django.contrib.postgres.fields import ArrayField
from django.forms import MultipleChoiceField
@deconstructible
class UploadPath(object):
def __init__(self, name):
self.name = name
def __call__(self, instance, filename):
return os.path.join(
instance._meta.app_label,
instance._meta.model_name,
self.name,
str(instance.id),
)
class LowerEmailField(EmailField):
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is not None:
value = value.lower()
return value
class DivisionsField(ArrayField):
def formfield(self, **kwargs):
defaults = {
'form_class': MultipleChoiceField,
'choices': self.base_field.choices,
}
defaults.update(kwargs)
# Skip our parent's formfield implementation completely as we don't
# care for it.
# pylint:disable=bad-super-call
return super(ArrayField, self).formfield(**defaults)
def to_python(self, value):
res = super().to_python(value)
if isinstance(res, list):
value = [self.base_field.to_python(val) for val in res]
return value
class TimezoneField(serializers.Field):
def to_representation(self, obj):
return six.text_type(obj)
def to_internal_value(self, data):
try:
return pytz.timezone(str(data))
except pytz.exceptions.UnknownTimeZoneError:
raise ValidationError('Unknown timezone')
class ValidatedPhoneField(CharField):
def from_db_value(self, value, expression, connection):
try:
value = phonenumbers.parse(value, 'US')
except phonenumbers.NumberParseException:
return ""
return phonenumbers.format_number(value, phonenumbers.PhoneNumberFormat.E164)
class LowerEmailField(EmailField):
def from_db_value(self, value, expression, connection):
try:
validate_email(value)
except ValidationError:
return None
return value.lower()
class VoicePartField(CharField):
def from_db_value(self, value, expression, connection):
part_map = {
'tenor': 'tenor',
'lead': 'lead',
'baritone': 'baritone',
'bass': 'bass',
}
try:
return part_map[value.lower().strip()]
except AttributeError:
return None
except KeyError:
return None
class ReasonableBirthDate(DateField):
def from_db_value(self, value, expression, connection):
if value == date(1900, 1, 1) or value == date(2018, 11, 13):
return None
return value
class GenderField(CharField):
def from_db_value(self, value, expression, connection):
gender_map = {
'men': 'male',
'women': 'female',
'mixed': 'mixed',
}
try:
return gender_map[value.lower()]
except AttributeError:
return None
except KeyError:
return None
@deconstructible
class ImageUploadPath(object):
def __init__(self, name):
self.name = name
def __call__(self, instance, filename):
return os.path.join(
instance._meta.app_label,
instance._meta.model_name,
self.name,
str(instance.id),
)
class NoPunctuationCharField(CharField):
def from_db_value(self, value, expression, connection):
if not value:
return ""
return value.translate(
value.maketrans('', '', '!"#$%&()*+,./:;<=>?@[\\]^_`{|}~')
).strip()
class TimezoneField(serializers.Field):
def to_representation(self, obj):
return six.text_type(obj)
def to_internal_value(self, data):
try:
return pytz.timezone(str(data))
except pytz.exceptions.UnknownTimeZoneError:
raise ValidationError('Unknown timezone')
| [
"dbinetti@gmail.com"
] | dbinetti@gmail.com |
a737f00847838c5d50695751bc37fa22764d4575 | 835db5ec0fc127df1de58a9a3af4a869a1a7cd84 | /assignments/strings/word_count.py | fc5dd5e977ac303db00540d29a365b626279f30b | [] | no_license | thorhilduranna/2020-3-T-111-PROG | 3ba097e1b54d68bdd6efbf1d7f90911a9336fa5a | c9758b61256aa6e39a3308e576c8ad0bf2b6d027 | refs/heads/master | 2023-02-09T23:39:22.879653 | 2021-01-07T12:59:19 | 2021-01-07T12:59:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | a_str = input("Input a string: ")
inside_word = False
no_of_words = 0
no_of_letters = 0
for char in a_str:
if char.isalpha() or char.isdigit():
no_of_letters += 1
if not inside_word:
no_of_words += 1
inside_word = True
else:
inside_word = False
print("No. of letters {}, no. of words: {}".format(no_of_letters, no_of_words))
| [
"hrafnl@gmail.com"
] | hrafnl@gmail.com |
a2ce63face177225a9ae85576b4b6a7d69f8158e | 457a71c31c5bb992616bc2c8067817436c416784 | /src/teewtme/tweets/forms.py | 8dc47f94a55be3f0ebcf51740d42dfd2d3f6e57e | [] | no_license | mahmoudzeyada/tweetme | 9a7f15e9b1c3d0d4054637ac8ad5581a0a5ee825 | 00c046b96d40061e192990b9bae76998e8f46319 | refs/heads/master | 2020-04-10T04:17:13.130415 | 2019-04-29T01:17:28 | 2019-04-29T01:17:28 | 160,793,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from django import forms
from .models import Tweet
from django.contrib.auth import get_user_model
class TweetModelForm (forms.ModelForm):
content = forms.CharField(required=False,label="",
widget=forms.Textarea(
attrs={'placeholder':"your tweet",
"class":"form-control"}
))
class Meta:
model=Tweet
fields=['content']
def clean_content(self,*args,**kwargs):
content = self.cleaned_data.get("content")
if content=="bitch":
raise forms.ValidateError("you r the fqin bitch")
return content
| [
"mahmoudzeyada440@gmail.com"
] | mahmoudzeyada440@gmail.com |
bf533929e72d3e88457deb66a33be5a79bd0fc16 | bce3601042879a059bf878a9d7972967432fc154 | /scripts/DA_join_apps_rfcd_seos_2.py | 1c60d205c037784111dcff3a28881aad218f95e7 | [] | no_license | diegoami/DSRetreat_Melbourne_DIOLEM | 8aa49d0f178651af9f0c3ed23c0155790b205160 | 7e4df7a48f650360d3e34c700e17a84b1f7511b1 | refs/heads/master | 2021-07-08T08:55:56.580343 | 2017-10-06T23:02:07 | 2017-10-07T18:11:25 | 105,751,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | import pandas as pd
seo_train = pd.read_csv('../data/train_seo_mod.csv')
rfcd_train = pd.read_csv('../data/train_rfcd_mod.csv')
app_train = pd.read_csv('../data/train_apps_mod.csv')
seo_test = pd.read_csv('../data/test_seo_mod.csv')
rfcd_test = pd.read_csv('../data/test_rfcd_mod.csv')
app_test = pd.read_csv('../data/test_apps_mod.csv')
def generate_table(seo, rfcd, app):
rfcd_pivoted = rfcd.pivot(index='id', columns='RFCD.Code', values='RFCD.Percentage').fillna(0)
seo_pivoted = seo.pivot(index='id', columns='SEO.Code', values='SEO.Percentage').fillna(0)
rfcd_pivoted.rename(columns=lambda x: "RFCD_" + str(x), inplace=True)
seo_pivoted.rename(columns=lambda x: "SEO_" + str(x), inplace=True)
app_rfcd = app.join(rfcd_pivoted, how='left').fillna(0)
app_rfcd_seo = app_rfcd.join(seo_pivoted, how='left').fillna(0)
app_rfcd_seo['RFCD_OTHER'] = 100 - app_rfcd_seo[
[x for x in app_rfcd_seo.columns if x.startswith('RFCD')]].sum(axis=1)
app_rfcd_seo['SEO_OTHER'] = 100 - app_rfcd_seo[
[x for x in app_rfcd_seo.columns if x.startswith('SEO')]].sum(axis=1)
app_rfcd_seo.set_index('id', inplace=True)
return app_rfcd_seo
app_rfcd_seo_train = generate_table(seo_train, rfcd_train, app_train)
app_rfcd_seo_train.to_csv('../data/train_apps_rfcd_seo_mod.csv')
app_rfcd_seo_test = generate_table(seo_test, rfcd_test, app_test)
app_rfcd_seo_test.to_csv('../data/test_apps_rfcd_seo_mod.csv')
| [
"diego.amicabile@gmail.com"
] | diego.amicabile@gmail.com |
59e4b2564ddea552ed9897307d3df7b9d25bb025 | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/collective.monkeypatcher-1.0.1-py2.7.egg/collective/monkeypatcher/tests/dummypatch.py | f9a09150452fd23fb7a4f27b88e58122d022afb5 | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | # -*- coding: utf-8
# $Id: dummypatch.py 84132 2009-04-12 21:13:03Z glenfant $
"""Class, function and patch for test cases"""
class Dummy(object):
"""As said"""
def someMethod(self):
"""someMethod docstring"""
return"original"
def patchedMethod(self):
"""patchedMethod docstring"""
return "patched"
def someFunction(value):
"""someFunction docstring"""
return value
def patchedFunction(value):
"""patchedFunction docstring"""
return value * 2
class Foo(object):
"""As said"""
def someFooMethod(self):
return "fooMethod result"
def patchedFooMethod(self):
return "patchedFooMethod result"
def my_appy_patch(scope, original, replacement):
setattr(scope, original, replacement)
return
all_patches = []
def monkeyPatchHandler(event):
"""Fake handler"""
global all_patches
all_patches.append(event)
return
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
cb81bed189406073d2dd70cd655ed560a290f09f | 1f886c4ba2bd9b440da96e67f1f07d11d1a7bebc | /jsoncomment/wrapper.py | 32af3ee60e114cdbef9e127b1061647892f4203c | [] | no_license | halfbrained/cuda_spell_checker | c9c56db7f9d0b61839e37da67dab1498cdbb1911 | bee0ac8a3481e92424488bcde0e7fd2020341819 | refs/heads/master | 2023-03-16T11:23:49.375473 | 2021-03-07T20:22:22 | 2021-03-07T20:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py |
################################################################################
from types import ModuleType
################################################################################
# A Class to simulate dynamic inheritance
# Allows to change behaviour of multiple modules or classes, with the same
# interface
# Note: Class wrapping only partially tested
class GenericWrapper:
# object_to_wrap can be:
# A Module
# A Class Instance
def __init__(self, object_to_wrap):
self.object_to_wrap = object_to_wrap
if isinstance(object_to_wrap, ModuleType):
self._lookup = lambda name : self.object_to_wrap.__dict__[name]
elif isinstance(object_to_wrap, object):
self._lookup = self.object_to_wrap.__getattr__
else:
raise TypeError("Expected a Module or a Class Instance")
# Fallback lookup for undefined methods
def __getattr__(self, name):
return self._lookup(name)
################################################################################
| [
"support@uvviewsoft.com"
] | support@uvviewsoft.com |
be45c7de7915d1e3540fc93c4a9d108362a73d1c | 846c2bc8e37673476af67c7c6dd2f64faa4213f1 | /autofeat/generate_error.py | 8fb8b2112890d300b1dbd52670c2fbf5f914e6e1 | [
"MIT"
] | permissive | FelixNeutatz/LassoLarsCVBugGenerator | b759221da9e438ce5c9ed4c229a411fb10424646 | 40aa61e1164676cc4e55ae145c41304b6e5c36c0 | refs/heads/master | 2020-04-25T16:34:00.320062 | 2019-03-19T13:19:25 | 2019-03-19T13:19:25 | 172,917,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | from autofeat import AutoFeatRegression
import numpy as np
import pandas as pd
import sklearn.linear_model as lm
X = np.load('data/X.npy')
target = np.load('data/y.npy')
afreg = AutoFeatRegression(n_jobs=4)
try:
df = afreg.fit_transform(X, target)
except:
eps = 1e-08
X = np.load('/tmp/X_error.npy')
target = np.load('/tmp/target_error.npy')
reg = lm.LassoLarsCV(eps=eps)
reg.fit(X, target)
| [
"neutatz@googlemail.com"
] | neutatz@googlemail.com |
d58edd9ba4df1a10a0c057dbc474e1f1af8907ec | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/b451bd6e6c110e9e39aef80f9b63f26a2e0ec713settings.py | b451bd6e6c110e9e39aef80f9b63f26a2e0ec713 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 522 | py | #
# enable giraffe's requirements and some of the giraffe apps
#
INSTALLED_APPS = (
# ...
# all your other apps, plus:
'south',
'djcelery',
'giraffe.aggregator',
'giraffe.publisher',
)
#
# django-celery settings:
#
# http://celeryq.org/docs/django-celery/getting-started/first-steps-with-django.html
#
BROKER_HOST = 'localhost'
BROKER_PORT = 5672
BROKER_USER = 'giraffe'
BROKER_PASSWORD = 'giraffe'
BROKER_VHOST = '/'
CELERY_RESULT_BACKEND = 'amqp'
import djcelery
djcelery.setup_loader()
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
f8a5912c87c8cc8130cf9cee0bde06306260708d | 0188f7dfe26a2b7f925814f202a9be653abcb859 | /prodinward/models.py | a30818056578c48db2b3b619a37bf03fb39e0f50 | [] | no_license | viralsir/Furniture_Inhouse_app | d5ac0a764681816dd63451b7d82303f1538ef2ec | d194251e93c537305d97eff968a5584a4c9de12b | refs/heads/master | 2023-06-15T16:03:07.386722 | 2021-07-08T10:43:14 | 2021-07-08T10:43:14 | 377,119,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | from django.db import models
from django.urls import reverse
from product.models import product
# Create your models here.
class prodinward(models.Model):
prod = models.ForeignKey(product, on_delete=models.CASCADE, related_name="product")
quantity = models.CharField(max_length=100)
rate = models.FloatField(max_length=100)
price = models.FloatField(max_length=100)
discount = models.FloatField(max_length=100, blank=True, null=True)
gst = models.FloatField(max_length=100, blank=True, null=True)
is_biiled=models.BooleanField(default=False)
billed=models.IntegerField(default=0)
def __str__(self):
return f"{self.price}"
def get_absolute_url(self):
return reverse("closed") | [
"viralsir2018@gmail.com"
] | viralsir2018@gmail.com |
768714cdb17e4f577667dce3bd002cfd50f0e577 | c5675cf0c2a83075e8a76ff54caa7384e4f4d554 | /mdevbox/forms.py | c8320d31b7b1db820b88db4d693ec0ff62d96072 | [] | no_license | Quantumke/mdevbox | 68969742a828e22166750c0dfb00d5d3b21f30da | 6d696423af00f7cd899e5bcfa8dc3f792de19891 | refs/heads/master | 2016-09-13T06:23:21.595605 | 2016-05-06T08:47:13 | 2016-05-06T08:47:13 | 56,508,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | from mdevbox.models import *
from django.contrib.auth.models import User
from django import forms
from .models import *
#-------------------------------------------------------------------------------------------------------------------- user auth
class developersemployment(forms.ModelForm):
class Meta:
model = developers_employment
fields = ('email', 'speciality', 'previous_employer', 'role_previous_employment','begin_previous_employment','end_previous_employment',)
class developerseducation(forms.ModelForm):
class Meta:
model=developers_education
fields=('highest_education', 'institute_name','course','begin_education','end_education')
class developersportfolio(forms.ModelForm):
class Meta:
model=developers_portfolio
fields=( 'portfoli_name', 'portfoli_tech', 'portfoli_link', 'portfoli_desc',)
class hire(forms.ModelForm):
class Meta:
model=hire
fields=('company','job_title','job_description', )
class postjob(forms.ModelForm):
class Meta:
model=post_job
fields=('company', 'job_title', 'job_description', 'pay',)
| [
"nguruben@gmail.com"
] | nguruben@gmail.com |
0522f8ff0a25769612c10267738a1c953edf88f6 | 961ddbe3e6a75339cf94679e0a7d313cd7a6c1d9 | /goods/urls.py | 3a54b874b8b147e627a2bbb7a0753d45296a6c58 | [
"Apache-2.0"
] | permissive | chinxianjun2016/GreaterWMS | 41b3a8d855f7f00f5bd91364339640f049a3b7dc | aacd0e15e0114f103eb57002e93670c008cce63b | refs/heads/master | 2023-02-07T13:24:10.313463 | 2021-01-02T01:52:41 | 2021-01-02T01:52:41 | 322,737,456 | 0 | 0 | Apache-2.0 | 2021-01-02T01:52:42 | 2020-12-19T00:50:07 | null | UTF-8 | Python | false | false | 1,112 | py | """singosgu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, re_path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path(r'', views.APIViewSet.as_view({"get": "list", "post": "create"}), name="goods"),
re_path(r'^(?P<pk>\d+)/$', views.APIViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
}), name="goods_1"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"singosgu@gmail.com"
] | singosgu@gmail.com |
e9bf2dd1bb681537e355f54163005cd1bb9143ea | 8839bd1f2e35726b6c8066985690fa2fa86b09a6 | /5.pyAI-K210/1.基础实验/4.外部中断/main.py | 5e4efe593247c0e8f7668025ad9582d34f1f0e97 | [
"MIT"
] | permissive | elektrik-elektronik-muhendisligi/MicroPython-Examples-1 | a9532b06aba470f7f26f841929f4fb145549f70b | f7b08e95ff73e3417af21918c9c6bcf2f83281c6 | refs/heads/master | 2021-05-25T22:58:36.207098 | 2020-04-01T09:50:53 | 2020-04-01T09:50:53 | 253,956,073 | 1 | 0 | null | 2020-04-08T01:39:46 | 2020-04-08T01:39:45 | null | UTF-8 | Python | false | false | 726 | py | '''
实验名称:外部中断
版本: v1.0
日期: 2019.12
作者: 01Studio
说明:通过按键改变 LED 的亮灭状态(外部中断方式)
'''
from Maix import GPIO
from fpioa_manager import fm
import utime
#注册IO,注意高速GPIO口才有中断
fm.register(12, fm.fpioa.GPIO0)
fm.register(16, fm.fpioa.GPIOHS0)
#构建lED和KEY对象
LED_B=GPIO(GPIO.GPIO0,GPIO.OUT,value=1)
KEY=GPIO(GPIO.GPIOHS0, GPIO.IN, GPIO.PULL_UP)
#LED状态表示
state = 1
#中断回调函数
def fun(KEY):
global state
utime.sleep_ms(10) #消除抖动
if KEY.value()==0: #确认按键被按下
state = not state
LED_B.value(state)
#开启中断,下降沿触发
KEY.irq(fun, GPIO.IRQ_FALLING)
| [
"237827161@qq.com"
] | 237827161@qq.com |
0aef20c455303401003a675deed4e7888a3a0865 | a86ca34e23afaf67fdf858df9e47847606b23e0c | /lib/temboo/Library/Amazon/Marketplace/Reports/MerchantListingsReport.py | f9c80bd5c6ad689f862a6e51a89af891e22ad131 | [] | no_license | miriammelnick/dont-get-mugged | 6026ad93c910baaecbc3f5477629b0322e116fa8 | 1613ee636c027ccc49c3f84a5f186e27de7f0f9d | refs/heads/master | 2021-01-13T02:18:39.599323 | 2012-08-12T23:25:47 | 2012-08-12T23:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,736 | py |
###############################################################################
#
# MerchantListingsReport
# Returns a tab-delimited report of active listings.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class MerchantListingsReport(Choreography):
"""
Create a new instance of the MerchantListingsReport Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Amazon/Marketplace/Reports/MerchantListingsReport')
def new_input_set(self):
return MerchantListingsReportInputSet()
def _make_result_set(self, result, path):
return MerchantListingsReportResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return MerchantListingsReportChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the MerchantListingsReport
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class MerchantListingsReportInputSet(InputSet):
"""
Set the value of the AWSAccessKeyId input for this choreography. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
def set_AWSAccessKeyId(self, value):
InputSet._set_input(self, 'AWSAccessKeyId', value)
"""
Set the value of the AWSMarketplaceId input for this choreography. ((required, string) The Marketplace ID provided by Amazon Web Services.)
"""
def set_AWSMarketplaceId(self, value):
InputSet._set_input(self, 'AWSMarketplaceId', value)
"""
Set the value of the AWSMerchantId input for this choreography. ((required, string) The Merchant ID provided by Amazon Web Services.)
"""
def set_AWSMerchantId(self, value):
InputSet._set_input(self, 'AWSMerchantId', value)
"""
Set the value of the AWSSecretKeyId input for this choreography. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
def set_AWSSecretKeyId(self, value):
InputSet._set_input(self, 'AWSSecretKeyId', value)
"""
Set the value of the Endpoint input for this choreography. ((optional, string) The base URL for the MWS endpoint. Defaults to mws.amazonservices.co.uk.)
"""
def set_Endpoint(self, value):
InputSet._set_input(self, 'Endpoint', value)
"""
Set the value of the TimeToWait input for this choreography. ((optional, integer) By default, the Choreo will wait for 5 minutes to see if the report is ready for retrieval. Max is 120 minutes.)
"""
def set_TimeToWait(self, value):
InputSet._set_input(self, 'TimeToWait', value)
"""
A ResultSet with methods tailored to the values returned by the MerchantListingsReport choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class MerchantListingsReportResultSet(ResultSet):
"""
Retrieve the value for the "Report" output from this choreography execution. ((multiline) The report contents.)
"""
def get_Report(self):
return self._output.get('Report', None)
"""
Retrieve the value for the "GeneratedReportId" output from this choreography execution. ((integer) The GeneratedReportId parsed from the Amazon response.)
"""
def get_GeneratedReportId(self):
return self._output.get('GeneratedReportId', None)
"""
Retrieve the value for the "ReportProcessingStatus" output from this choreography execution. ((string) The status of the report request parsed from the Amazon response.)
"""
def get_ReportProcessingStatus(self):
return self._output.get('ReportProcessingStatus', None)
"""
Retrieve the value for the "ReportRequestId" output from this choreography execution. ((integer) The ReportRequestId parsed from the Amazon response. This id is used in GetReportRequestList.)
"""
def get_ReportRequestId(self):
return self._output.get('ReportRequestId', None)
class MerchantListingsReportChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return MerchantListingsReportResultSet(response, path)
| [
"miriam@famulus"
] | miriam@famulus |
d404e0bda811fd33c2e96dbfaa0870131e8184a9 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/speedchat/TTSCHalloweenMenu.py | 9451e1ceac323d44fae4773d6c57b1a6f0a65bf7 | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | # 2013.08.22 22:25:08 Pacific Daylight Time
# Embedded file name: toontown.speedchat.TTSCHalloweenMenu
from direct.showbase import PythonUtil
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCMenuHolder import SCMenuHolder
from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal
from otp.otpbase import OTPLocalizer
HalloweenMenu = [(OTPLocalizer.HalloweenMenuSections[0], [30250, 30251, 30252])]
class TTSCHalloweenMenu(SCMenu):
__module__ = __name__
def __init__(self):
SCMenu.__init__(self)
self.__messagesChanged()
def destroy(self):
SCMenu.destroy(self)
def clearMenu(self):
SCMenu.clearMenu(self)
def __messagesChanged(self):
self.clearMenu()
try:
lt = base.localAvatar
except:
return
for section in HalloweenMenu:
if section[0] == -1:
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link Halloween phrase %s which does not seem to exist' % phrase
break
self.append(SCStaticTextTerminal(phrase))
else:
menu = SCMenu()
for phrase in section[1]:
if phrase not in OTPLocalizer.SpeedChatStaticText:
print 'warning: tried to link Halloween phrase %s which does not seem to exist' % phrase
break
menu.append(SCStaticTextTerminal(phrase))
menuName = str(section[0])
self.append(SCMenuHolder(menuName, menu))
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\speedchat\TTSCHalloweenMenu.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:25:08 Pacific Daylight Time
| [
"anonymoustoontown@gmail.com"
] | anonymoustoontown@gmail.com |
5cadfcc566e5d20ec4df60ec331f7b1069d07004 | 5d3c546ed535fda1a62c2bd1a2dd36d86ea3049d | /manage.py | 55843edd0f9d4f7175279abbc27ee7621badd553 | [] | no_license | rcthomas/decals-web | 6e642082c7daf22f41f3f1ed0ea4db6f74bed682 | 64467865227d33124f42febdbd09e49e64e0dbfa | refs/heads/master | 2021-05-10T15:46:42.048404 | 2018-01-22T20:06:09 | 2018-01-22T20:06:09 | 118,561,087 | 0 | 0 | null | 2018-01-23T05:13:27 | 2018-01-23T05:13:26 | null | UTF-8 | Python | false | false | 249 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "decals.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"dstndstn@gmail.com"
] | dstndstn@gmail.com |
05391db40e348f679bd0fb7f3aff7fb9312e86dc | 6515dee87efbc5edfbf4c117e262449999fcbb50 | /cn/437.py | 9db648c3425be2e1fcd9d392cba20e26e2e594f9 | [] | no_license | wangyunge/algorithmpractice | 24edca77e180854b509954dd0c5d4074e0e9ef31 | 085b8dfa8e12f7c39107bab60110cd3b182f0c13 | refs/heads/master | 2021-12-29T12:55:38.096584 | 2021-12-12T02:53:43 | 2021-12-12T02:53:43 | 62,696,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def pathSum(self, root, targetSum):
"""
:type root: TreeNode
:type targetSum: int
:rtype: int
"""
self.res = 0
def _dfs(node, table, cul):
if node:
cul += node.val
if cul - targetSum in table:
self.res += table[cul-targetSum]
if cul in table:
table[cul] += 1
else:
table[cul] = 1
_dfs(node.left, table, cul)
_dfs(node.right, table, cul)
table[cul] -= 1
_dfs(root, {0:1}, 0) # B arr need a start
return self.res
| [
"wangyunge1@yahoo.com"
] | wangyunge1@yahoo.com |
2ce8806bfe4f7d6fc569bc79af8db82d2fc93e01 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_083/ch87_2020_06_22_18_11_23_637038.py | a6ce84b797d6cb26843f6f35d6d9f2edd6852dc2 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | preco_total = 0
with open('churras.txt', 'r') as arquivo:
conteudo = arquivo.readlines()
for contador in conteudo:
listas = conteudo.strip()
lista = listas.split(',')
preco_total += int(listas[1])*float(listas[2])
print(preco_total) | [
"you@example.com"
] | you@example.com |
6e84f34b20b277f4cc2f6eed0c7053350f95dd5d | 0f8bb3285ae796ad0c000fb7f0d897bf9d92aef3 | /prepare_data.py | 0fc88eeb4834c5c2a6e425d93e78f9c45049e96b | [
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | viuts/stylegan2-pytorch | 3e23479ec53d5d8f39fcae6f5b95c2bb1f005a88 | 2fdd10b3e9e3261390032963c59646cd298a60b1 | refs/heads/master | 2020-12-04T22:04:35.950846 | 2020-01-11T05:08:35 | 2020-01-11T05:08:35 | 231,915,562 | 0 | 0 | NOASSERTION | 2020-01-05T12:40:25 | 2020-01-05T12:40:24 | null | UTF-8 | Python | false | false | 2,189 | py | import argparse
from io import BytesIO
import multiprocessing
from functools import partial
from PIL import Image
import lmdb
from tqdm import tqdm
from torchvision import datasets
from torchvision.transforms import functional as trans_fn
def resize_and_convert(img, size, quality=100):
img = trans_fn.resize(img, size, Image.LANCZOS)
img = trans_fn.center_crop(img, size)
buffer = BytesIO()
img.save(buffer, format='jpeg', quality=quality)
val = buffer.getvalue()
return val
def resize_multiple(img, sizes=(128, 256, 512, 1024), quality=100):
imgs = []
for size in sizes:
imgs.append(resize_and_convert(img, size, quality))
return imgs
def resize_worker(img_file, sizes):
i, file = img_file
img = Image.open(file)
img = img.convert('RGB')
out = resize_multiple(img, sizes=sizes)
return i, out
def prepare(transaction, dataset, n_worker, sizes=(128, 256, 512, 1024)):
resize_fn = partial(resize_worker, sizes=sizes)
files = sorted(dataset.imgs, key=lambda x: x[0])
files = [(i, file) for i, (file, label) in enumerate(files)]
total = 0
with multiprocessing.Pool(n_worker) as pool:
for i, imgs in tqdm(pool.imap_unordered(resize_fn, files)):
for size, img in zip(sizes, imgs):
key = f'{size}-{str(i).zfill(5)}'.encode('utf-8')
transaction.put(key, img)
total += 1
transaction.put('length'.encode('utf-8'), str(total).encode('utf-8'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', type=str)
parser.add_argument('--size', type=str, default='128,256,512,1024')
parser.add_argument('--n_worker', type=int, default=8)
parser.add_argument('path', type=str)
args = parser.parse_args()
sizes = [int(s.strip()) for s in args.size.split(',')]
print(f'Make dataset of image sizes:', ', '.join(str(s) for s in sizes))
imgset = datasets.ImageFolder(args.path)
with lmdb.open(args.out, map_size=1024 ** 4, readahead=False) as env:
with env.begin(write=True) as txn:
prepare(txn, imgset, args.n_worker, sizes=sizes)
| [
"kim.seonghyeon@snu.ac.kr"
] | kim.seonghyeon@snu.ac.kr |
77bed12fb92ed9a4be75f7a3e3e79b0e4a1560f2 | 9ba2b89dbdeefa54c6b6935d772ce36be7b05292 | /devilry/devilry_group/migrations/0002_feedbackset_gradeform_json.py | 61b10f5028325b2b42f1b43c33e35c9c77ae6cf1 | [] | no_license | kristtuv/devilry-django | 0ffcd9d2005cad5e51f6377484a83d778d65050f | dd2a4e5a887b28268f3a45cc3b25a40c0e313fd3 | refs/heads/master | 2020-04-27T06:02:45.518765 | 2019-02-15T13:28:20 | 2019-02-15T13:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('devilry_group', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='feedbackset',
name='gradeform_json',
field=models.TextField(null=True, blank=True),
),
]
| [
"stianjul@gmail.com"
] | stianjul@gmail.com |
75ceb6fdf8315b35a20773ebbf24f3381cb9ae67 | 1e4aef2d451a97a8aafb4e1cddfa4ebb0cc309cc | /first_step/shopping_cart/shopping_cart.py | 7665887eb831ea8e6b30c1d9609e1458235dfea7 | [] | no_license | FleeaniCh/python | 4c40b5d9d711796cee49a7b95bb7f64fa9257662 | 129a759212a8221de1a3d9c1cb3b66c6fece5a63 | refs/heads/master | 2023-03-12T06:40:05.120123 | 2021-02-28T02:03:34 | 2021-02-28T02:03:34 | 342,999,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,263 | py | dict_commodity_info = {
101: {"name": "屠龙刀", "price": 10000},
102: {"name": "倚天剑", "price": 10000},
103: {"name": "九阴白骨爪", "price": 8000},
104: {"name": "九阳神功", "price": 9000},
105: {"name": "降龙十八掌", "price": 8000},
106: {"name": "乾坤大挪移", "price": 10000}
}
list_order = []
def select_menu():
"""
购物
"""
while True:
item = input("1键购买,2键结算。")
if item == "1":
buying()
elif item == "2":
settlement()
def settlement():
"""
结算
"""
total_price = calculate_total_price()
paying(total_price)
def paying(total_price):
"""
支付过程
:param total_price:需要支付的价格
"""
while True:
qian = float(input("总价%d元,请输入金额:" % total_price))
if qian >= total_price:
print("购买成功,找回:%d元。" % (qian - total_price))
list_order.clear()
break
else:
print("金额不足.")
def calculate_total_price():
total_price = 0
for order in list_order:
commodity = dict_commodity_info[order["cid"]]
print("商品:%s,单价:%d,数量:%d." % (commodity["name"], commodity["price"], order["count"]))
total_price += commodity["price"] * order["count"]
return total_price
def buying():
"""
购买
"""
print_commodity_info()
create_order()
print("添加到购物车。")
def create_order():
"""
创建订单
"""
cid = input_commodity_id()
count = int(input("请输入购买数量:"))
order = {"cid": cid, "count": count}
list_order.append(order)
def input_commodity_id():
"""
获取商品订单
"""
while True:
cid = int(input("请输入商品编号:"))
if cid in dict_commodity_info:
break
else:
print("该商品不存在")
return cid
def print_commodity_info():
"""
打印商品信息
"""
for key, value in dict_commodity_info.items():
print("编号:%d,名称:%s,单价:%d。" % (key, value["name"], value["price"]))
select_menu()
| [
"17354108830@163.com"
] | 17354108830@163.com |
e487f15efe7b9383e9f8e34e2b22deb1e0ffbaea | 7ea1beb4e0442cc494b53700a9494c4eb05ad9bb | /flaskboot/ConfigProject/config.py | 54a8a203ae0f7654037344553388184142909fd3 | [] | no_license | liwei123o0/FlaskBBS | 9fd26bcd133a16d57903cbcd5d66c412babb3a1a | 92360a7d8bf8667d314ca6d0839323346f314bf8 | refs/heads/master | 2020-11-26T16:33:02.668878 | 2015-09-28T08:48:14 | 2015-09-28T08:48:14 | 42,153,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | # -*- coding: utf-8 -*-
#! /usr/bin/env python
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = 'liweiCDK'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
FLASK_MAIL_SUBJECT_PREFIX = '[ConfigProject]'
FLASK_MAIL_SENDER = 'liweijavakf@163.com'
FLASK_ADMIN = 'liwei'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
MAIL_SERVER = 'smtp.163.com'
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USERNAME = 'liweijavakf@163.com'
MAIL_PASSWORD = 'liwei429'
SQLALCHEMY_DATABASE_URI = 'mysql://root:root@127.0.0.1/flask'
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'configdata.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'configdata.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig,
}
| [
"877129310@qq.com"
] | 877129310@qq.com |
885459dea8753a0fa00de87c86459898fdf2ae8c | 491f9ca49bbb275c99248134c604da9fb43ee9fe | /P4_Mini_Project_NEURON/CaHVA_Allen/SK_Allen/plot_caproperties.py | 341397e1b2367ed75c37814b6a32e6e17ed72645 | [] | no_license | KineOdegardHanssen/PhD-subprojects | 9ef0facf7da4b2a80b4bea9c890aa04f0ddcfd1a | c275539689b53b94cbb85c0fdb3cea5885fc40e9 | refs/heads/Windows | 2023-06-08T13:32:15.179813 | 2023-06-05T08:40:10 | 2023-06-05T08:40:10 | 195,783,664 | 2 | 0 | null | 2020-08-18T14:42:21 | 2019-07-08T09:49:14 | Python | UTF-8 | Python | false | false | 4,369 | py | import os
from os.path import join
import sys
import matplotlib.pyplot as plt
import json
import neuron
import time as tm
import numpy as np
from matplotlib import gridspec
iamp = 0.006
idur = 1000
dtexp = -7
v_init = -86
somasize = 10
cm_factor = 1.0
t_before_rec = -600.0
conc_at_halfopen = 0.00043287612810830614
gcahva = 0.2
gsk = 1.0
namestring = ''
namestring = namestring + '_gSK'+str(gsk)+'p'
namestring = namestring + '_gCaHVA'+str(gcahva)+'p'
namestring = namestring +'_'
folder = 'Results/Soma%i/current_idur%i_iamp'%(somasize,idur)+str(iamp)+'/'
if os.path.exists(folder)==False:
os.mkdir(folder)
#t, v, eca, cai, cao
filename = folder+namestring+'somaonly_cm'+str(cm_factor)+'_idur%i_iamp'%idur+str(iamp)+'_dtexp%i_vinit' % dtexp+str(v_init)+'_trec'+str(t_before_rec)+'_V_eca.txt'
data = np.loadtxt(filename)
t = data[:, 0]
v = data[:, 1]
eca = data[:, 2]
cai = data[:, 3]
cao = data[:, 4]
I_SK = data[:, 5]
I_Ca_HVA = data[:, 6]
g_SK = data[:, 7]
g_Ca_HVA = data[:, 8]
#fig, (ax1, ax2, ax3) = plt.subplots(3,1,figsize=(5,11),dpi=300)
fig = plt.figure(figsize=(10,8),dpi=300)#(figsize=(8,3),dpi=300)
gs = gridspec.GridSpec(4, 4)
ax1 = plt.subplot(gs[0, 0:2])
ax2 = plt.subplot(gs[1, 0:2])
ax3 = plt.subplot(gs[2, 0:2])
ax4 = plt.subplot(gs[3, 0:2])
ax5 = plt.subplot(gs[0, 2:4])
ax6 = plt.subplot(gs[1, 2:4])
ax7 = plt.subplot(gs[2, 2:4])
ax8 = plt.subplot(gs[3, 2:4])
ax1.plot(t,v)
ax1.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax1.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
#ax1.set_xlabel('V (mV)')
ax1.set_ylabel(r'$V$ (mV)',fontsize=12)
ax1.set_title(r'$I=$ %s nA' % str(iamp),fontsize=16)
ax1.set_title('A', loc='left',fontsize=18)
ax2.plot(t,eca,color='k')
ax2.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax2.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
#ax1.set_xlabel('V (mV)')
ax2.set_ylabel(r'$E_\mathregular{Ca}$',fontsize=12)
ax2.set_title(r'$E_\mathregular{Ca}$',fontsize=16)
ax2.set_title('A', loc='left',fontsize=18)
ax3.plot(t,cai,color='tab:brown')
ax3.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax3.axhline(y=conc_at_halfopen,color='k',linestyle='--',linewidth=0.75)
ax3.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
#ax2.set_xlabel('t (ms)',fontsize=12)
ax3.set_ylabel(r'Concentration (mM)',fontsize=12)
ax3.set_title(r'$\left[\mathregular{Ca}^{2+}\right]_\mathregular{in}$',fontsize=16)
ax3.set_title('B', loc='left',fontsize=18)
ax4.plot(t,cao,color='tab:brown')
ax4.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax4.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax4.set_xlabel(r'$t$ (ms)',fontsize=12)
ax4.set_ylabel(r'Concentration (mM)',fontsize=12)
ax4.set_title(r'$\left[\mathregular{Ca}^{2+}\right]_\mathregular{out}$',fontsize=16)
ax4.set_title('C', loc='left',fontsize=18)
ax5.plot(t,I_SK,color='tab:gray')
#ax1.set_xlabel('V (mV)')
ax5.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax5.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax5.set_ylabel(r'$I_\mathregular{SK}$ (nA)',fontsize=12)
ax5.set_title(r'$I_\mathregular{SK}$',fontsize=16)
ax5.set_title('D', loc='left',fontsize=18)
ax6.plot(t,I_Ca_HVA,color='tab:gray')
#ax1.set_xlabel('V (mV)')
ax6.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax6.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax6.set_ylabel(r'$I_\mathregular{CaHVA}$ (nA)',fontsize=12)
ax6.set_title(r'$I_\mathregular{CaHVA}$',fontsize=16)
ax6.set_title('E', loc='left',fontsize=18)
ax7.plot(t,g_SK,color='tab:purple')
#ax1.set_xlabel('V (mV)')
ax7.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax7.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax7.set_ylabel(r'$g_\mathregular{SK}$ (S/cm$^2$)',fontsize=12)
ax7.set_title(r'$g_\mathregular{SK}$',fontsize=16)
ax7.set_title('F', loc='left',fontsize=18)
ax8.plot(t,g_Ca_HVA,color='tab:purple')
#ax1.set_xlabel('V (mV)')
ax8.axvline(x=100,color='k',linestyle=':',linewidth=0.75)
ax8.axvline(x=1100,color='k',linestyle=':',linewidth=0.75)
ax8.set_xlabel(r'$t$ (ms)',fontsize=12)
ax8.set_ylabel(r'$g_\mathregular{CaHVA}$ (S/cm$^2$)',fontsize=12)
ax8.set_title(r'$g_\mathregular{CaHVA}$',fontsize=16)
ax8.set_title('G', loc='left',fontsize=18)
plt.tight_layout()
plt.savefig('Results/Soma%i/Ca-properties/Ca_E_and_concentrations_iamp'%somasize+str(iamp)+'_idur'+str(idur)+'.png')
plt.show()
| [
"noreply@github.com"
] | KineOdegardHanssen.noreply@github.com |
8632d96a8605aa4d7038e5a4711dc6e00361121a | e3765def4a180f1d51eaef3884448b0bb9be2cd3 | /example/09.4.6_nested_moudle/my_car.py | 01d9bc8bea5bdf6af985b22f91b6c3e5552edeab | [] | no_license | spearfish/python-crash-course | cbeb254efdf0c1ab37d8a7d2fa0409194f19fa2b | 66bc42d41395cc365e066a597380a96d3282d30b | refs/heads/master | 2023-07-14T11:04:49.276764 | 2021-08-20T10:02:27 | 2021-08-20T10:02:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | #!/usr/bin/env python3
from electric_car import ElectricCar
tesla = ElectricCar('tesla', 'model s', 2020)
print(tesla.get_descriptive_name())
| [
"jingchen@tutanota.com"
] | jingchen@tutanota.com |
a9475d4d9623e92ea5a6ec88961e95168f7ea56e | 50db76c3c6f1d56d454d9d8411f2c7969ce906a8 | /scrapeNews/scrapeNews/spiders/oneindiaHindi.py | 86d4846bda58de2ba9506653c15dad404951b9cf | [] | no_license | anujaagarwal/scrape | 4199ec4ea353235b8f9e254215210a3783480365 | 6c2e70920b40bb99f7fe287f8dce8179d68cad99 | refs/heads/master | 2021-09-04T14:43:54.752275 | 2018-01-19T15:53:05 | 2018-01-19T15:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,828 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapeNews.items import ScrapenewsItem
import logging
loggerError = logging.getLogger("scrapeNewsError")
class OneindiahindiSpider(scrapy.Spider):
name = 'oneindiaHindi'
allowed_domains = ['oneindia.com']
def __init__(self, pages=4, *args, **kwargs):
super(OneindiahindiSpider, self).__init__(*args, **kwargs)
for count in range(1 , int(pages)+1):
self.start_urls.append('https://hindi.oneindia.com/news/india/?page-no='+ str(count))
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, self.parse)
def parse(self, response):
newsContainer = response.xpath('//div[@id="collection-wrapper"]/article')
for newsBox in newsContainer:
link = 'https://hindi.oneindia.com/news/india/' + newsBox.xpath('div/h2/a/@href').extract_first()
yield scrapy.Request(url=link, callback=self.parse_article)
def parse_article(self, response):
item = ScrapenewsItem() # Scraper Items
item['image'] = self.getPageImage(response)
item['title'] = self.getPageTitle(response)
item['content'] = self.getPageContent(response)
item['newsDate'] = self.getPageDate(response)
item['link'] = response.url
item['source'] = 110
if item['image'] is not 'Error' or item['title'] is not 'Error' or item['content'] is not 'Error' or item['newsDate'] is not 'Error':
yield item
def getPageContent(self, response):
try:
data = ' '.join((''.join(response.xpath("//div[contains(@class,'io-article-body')]/p/text()").extract())).split(' ')[:40])
except:
loggerError.error(response.url)
data = 'Error'
return data
def getPageTitle(self, response):
data = response.xpath("//h1[contains(@class,'heading')]/text()").extract_first()
if (data is None):
loggerError.error(response.url)
data = 'Error'
return data
def getPageImage(self, response):
data = 'https://hindi.oneindia.com' + response.xpath("//img[contains(@class,'image_listical')]/@src").extract_first()
if (data is None):
data = 'https://hindi.oneindia.com' + response.xpath("//img[contains(@class,'image_listical')]/@data-pagespeed-lazy-src").extract_first()
if (data is None):
loggerError.error(response.url)
data = 'Error'
return data
def getPageDate(self, response):
try:
data = (response.xpath("//time/@datetime").extract_first()).rsplit('+',1)[0]
except Exception as Error:
loggerError.error(str(Error) + ' occured at: ' + response.url)
data = 'Error'
finally:
return data
| [
"ajay39in@gmail.com"
] | ajay39in@gmail.com |
ea63686d38514e03cb78a5213fb4e6ce1e1402d7 | 677c21c723a6d6003f8e8804bbd98d42992301c9 | /oz/plugins/json_api/options.py | 896060d8262e74682a27fd63e5da9c1f9c88436a | [] | no_license | xoxoj/oz | ebd95f53f6a34ac8c7e9f2210411d852492328b3 | 70b6b64c87c9f06e6edce2736b4c7d1394ca5cb5 | refs/heads/master | 2021-01-20T16:34:32.050599 | 2014-08-28T20:11:07 | 2014-08-28T20:11:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
import oz
oz.option("allow_jsonp", type=bool, default=True, help="Whether to allow for JSONP requests")
| [
"simonson@gmail.com"
] | simonson@gmail.com |
000de5eb783d8e4b5a0eaca28d77da1405077226 | 20860030d52b5be62cb797e396a5a6b83f45dc44 | /bravehub_shared/utils/dynamic_object.py | 8ca5b1b1dbcae64bd938af10720c6743fdebedf5 | [] | no_license | rcosnita/bravehub | 189d30c46224dd80d6fbf41c50a33559ec2f44ae | 960bcfdb3c2e53e81aa75f7a48980e4918cfd4bb | refs/heads/master | 2022-12-21T11:28:16.626690 | 2018-02-17T10:43:09 | 2018-02-17T10:43:09 | 98,259,347 | 0 | 1 | null | 2022-12-19T13:27:11 | 2017-07-25T03:17:44 | Python | UTF-8 | Python | false | false | 514 | py | """Provides a very simple implementation which creates dynamic objects starting from a dictionary.
"""
class DynamicObject(dict):
"""Provides a simple wrapper which allows us to simulate an object based on the dictionary
keys."""
def __init__(self, data=None):
super().__init__()
self._data = data or {}
self.update(self._data)
def __getattr__(self, name):
value = super(DynamicObject, self).get(name)
if isinstance(value, dict):
return DynamicObject(value)
return value
| [
"radu.cosnita@gmail.com"
] | radu.cosnita@gmail.com |
c267fba6099b86c7555a9dd5b18cd541c76b8015 | 2da6c42227de4c414dffa9dfd2da97862847e147 | /Algorismica avançada - UB/Pau/(ALGA)Algorismica avançada/(ALGA)Algorismica avançada/cristhian/practicas/p3/ex3_CarmonaTorresCristhian.py | 626de3d785184d8fadc5c76a0613b49a6de32bff | [] | no_license | fitigf15/PYTHON-VICTOR | 9aa4d0233532d5a58c4c9ec9ca02f069a5a5c2cc | 864ee3136839f2d507efae5c18a455a9f392020f | refs/heads/master | 2020-03-30T19:17:33.179271 | 2015-04-15T19:18:18 | 2015-04-15T19:18:18 | 24,456,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,478 | py | # Cristhian Carmona Torres
def leerFichero(nombre):
''' funcion que trata fichero de entrada y lo carga en matriz'''
file=open(nombre,"r")
text=file.readlines()
file.close()
global ma
ma =[[0]*9 for x in xrange(9)]
for fila in range(len(text)): # O(1)
for colum in range(9): # O(1)
if text[fila][colum]=='.':
ma[fila][colum] = 0
else:
ma[fila][colum] = int(text[fila][colum])
def sudokuVer(i, j, sol, inicial):
'''funcion que devuelve una matriz con la solucion del sudoku'''
if (i==9) and (j == 0):
print '***SUDOKU RESUELTO***'
for fi in sol:
print fi
else:
# Busca solucion si la casilla esta vacia/False
if not(inicial[i][j]):
# Comprueba posibles soluciones, desde 0 hasta 9
for k in range(1,10): # O(n)
sol[i][j] = k # Asigna solucion candidata
if satisfact(i, j, sol): # Comprueba si es satisfact
if (j == 8): # Cuando haya llegado al fin de las columnas empezara desde 0 otra vez...
sudokuVer(i+1, 0, sol, inicial)
else: # ...sino ira a la siguiente columna
sudokuVer(i, j+1, sol, inicial)
sol[i][j] = 0 # si no es satisfact volvera a colocar un 0 en la matriz
else: # Caso casilla inicial ocupada
if (j == 8):
sudokuVer(i+1, 0, sol, inicial)
else:
sudokuVer(i, j+1, sol, inicial)
return False
def sudoku(sol):
''' Programa que busca la solucion a un sudoku inicial'''
# Inicializamos la matriz auxiliar de booleanos
# Nos dira que casilla esta libre y la cual necesitamos buscar una solucion
inicial=[[False]*9 for x in xrange(9)]
print '***INICIAL SIN RESOLVER***'
for a in sol:
print a
# Inicializamos la matriz con los booleanos
for i in range(9): # O(1)
for j in range(9):
inicial[i][j] = sol[i][j]!=0
# Ejecutamos funcion que resuelve sudoku
sudokuVer(0, 0, sol, inicial)
def satisfact(i, j, sol):
''' Comprueba si es factible o no un posible resultado en una posicion '''
valido = True
k = 0
# Recorre columnas y verifica si existe el posible resultado, False si existe.
while ((k<9) and valido):
if (sol[i][j] == sol[k][j]) and (k!=i):
valido = False
k = k + 1
l = 0
# Recorre filas y verifica si existe el posible resultado, False si existe.
while ((l<9) and valido): # O(1)
if (sol[i][j] == sol[i][l]) and (l!=j):
valido = False
l = l + 1
# comprobamos dentro de la region de la posicion a solucionar
k = obtieneRegion(i)
l = obtieneRegion(j)
a = k
b = l
# Si no se repite en la misma fila y columna entonces busca en su region
while ((k < a+3) and valido): #O(1)
while ((l < b+3) and valido):
if (sol[i][j] == sol[k][l]) and (i!=k) and (j!=l):
valido = False
l = l + 1
k = k + 1
l = obtieneRegion(j)
return valido
def obtieneRegion(i):
''' funcion que devuelve la region en la que se encuentra la posicion[i,j] '''
cas = 0
region = 0
if ((i+1)%3 == 0):
cas = (i+3)/3
else:
cas = ((i+1)/3) + 1
if (cas == 1): region = 0 # empezara a recorrer desde la posicion 0: Region 1
if (cas == 2): region = 3 # empezara a recorrer desde la posicion 3: Region 2
if (cas == 3): region = 6 # empezara a recorrer desde la posicion 6: Region 3
return region
#----Codigo a ejecutar-----
leerFichero("sudoku1.txt")
sudoku(ma)
'''
sudo1 = [[1,2,0,0,5,0,7,0,0],
[8,4,0,3,7,0,5,0,1],
[9,0,0,0,4,2,0,6,8],
[5,0,8,0,2,0,9,0,0],
[6,0,2,8,3,0,1,5,4],
[7,3,4,0,1,0,0,8,0],
[0,0,0,0,0,0,4,0,0],
[4,6,1,0,0,3,8,7,5],
[3,5,9,0,8,4,6,1,0]]
-----------------------------
SOLUCION SUDOKU 1
[1,2,3,6,5,8,7,4,9]
[8,4,6,3,7,9,5,2,1]
[9,7,5,1,4,2,3,6,8]
[5,1,8,4,2,6,9,3,7]
[6,9,2,8,3,7,1,5,4]
[7,3,4,9,1,5,2,8,6]
[2,8,7,5,6,1,4,9,3]
[4,6,1,2,9,3,8,7,5]
[3,5,9,7,8,4,6,1,2]
'''
| [
"fiti.gf15@gmail.com"
] | fiti.gf15@gmail.com |
a9028b7299ff6a4d6d81002e6ae5b4392b63bfe8 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /mt4E3MYkoJASY8TE6_5.py | 3d585d3deedacfff68f630c5e47a071f21f443c9 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py |
from itertools import product
def adjacent(key):
return {
'0' : ('0', '8'),
'1' : ('1', '2', '4'),
'2' : ('1', '2', '3', '5'),
'3' : ('2', '3', '6'),
'4' : ('1', '4', '5', '7'),
'5' : ('2', '4', '5', '6', '8'),
'6' : ('3', '5', '6', '9'),
'7' : ('4', '7', '8'),
'8' : ('0', '5', '7', '8', '9'),
'9' : ('6', '8', '9')
}[key]
def crack_pincode(pincode):
codes = product(*map(adjacent, pincode))
return [''.join(code) for code in codes]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0baa3f08c72a3132e3304442ac9fc9bf099e582e | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008-EOL/desktop/kde/autostart/actions.py | 5576e5aca75b62900a5da327c29d6bfaba4e4595 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import kde
from pisi.actionsapi import pisitools
def setup():
kde.make("-f admin/Makefile.common")
kde.configure()
def build():
kde.make()
def install():
kde.install()
pisitools.remove("/usr/kde/3.5/share/applications/kde/autostart.desktop")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
25642237bc42a97e075903094c6cd7f7410f8680 | b99ccdd014aaa364053f8aaec062963efc0d0e71 | /reconfigure/configs/squid.py | bfd164219cd08ffc4e3fae97dbb7e3a83465538b | [] | no_license | Eugeny/reconfigure | c6eac546b9b50aaf33290f8cedf61dd55c77e9a3 | ff1115dede4b80222a2618d0e7657cafa36a2573 | refs/heads/master | 2020-12-24T17:55:04.489144 | 2020-11-25T16:10:28 | 2020-11-25T16:10:28 | 5,739,146 | 63 | 18 | null | 2020-09-03T14:15:22 | 2012-09-09T16:14:15 | Python | UTF-8 | Python | false | false | 419 | py | from reconfigure.configs.base import Reconfig
from reconfigure.parsers import SquidParser
from reconfigure.builders import BoundBuilder
from reconfigure.items.squid import SquidData
class SquidConfig (Reconfig):
def __init__(self, **kwargs):
k = {
'parser': SquidParser(),
'builder': BoundBuilder(SquidData),
}
k.update(kwargs)
Reconfig.__init__(self, **k)
| [
"e@ajenti.org"
] | e@ajenti.org |
71fab5d2fe8bbbe019765dfe164587d5dc579854 | 8259dd9ee47ed3cfa75315ccb6ab04859432b049 | /speaker/bosch.py | 7e4434d9cae41582a2c6a91f4ebcdee17663524d | [
"MIT"
] | permissive | shannon-jia/speaker | ef83572bf64d766d8abd21c3ed6483cdbc6ff7e0 | 31c642f018725dd4878ef6a4e7a19b12b05774c8 | refs/heads/master | 2020-03-22T02:30:25.199727 | 2018-07-02T00:59:03 | 2018-07-02T00:59:03 | 139,374,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,271 | py | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import asyncio
import logging
log = logging.getLogger(__name__)
class TcpClientProtocol(asyncio.Protocol):
def __init__(self, master):
self.master = master
def connection_made(self, transport):
self.transport = transport
self.master.connected = True
def data_received(self, data):
log.debug('Data received: {!r}'.format(data.decode()))
def connection_lost(self, exc):
log.error('The server closed the connection')
self.master.connected = None
class Bosch(object):
TYPE_OIP_Login = b'\x02\x70\x44\x00\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x61\x64\x6d\x69\x6e\x05\x00\x00\x00\x61\x64\x6d\x69\x6e'
TYPE_OIP_StartCall = b'\x03\x70\x44\x00\x39\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x50\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x41\x4c\x4c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x78\x69\x61\x6f\x66\x61\x6e\x67'
TYPE_OIP_KeepAlive = b'\x27\x70\x44\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def __init__(self, loop, host, port,
user='admin', passwd='admin'):
self.loop = loop
self.host = host
self.port = port
self.user = user
self.passwd = passwd
self.connected = None
self.loop.create_task(self._do_connect())
self.transport = None
self.loop.call_later(6, self.keepAlive)
async def _do_connect(self):
while True:
await asyncio.sleep(5)
if self.connected:
continue
try:
xt, _ = await self.loop.create_connection(
lambda: TcpClientProtocol(self),
self.host,
self.port)
log.info('Connection create on {}'.format(xt))
self.transport = xt
self.login()
except OSError:
log.error('Server not up retrying in 5 seconds...')
except Exception as e:
log.error('Error when connect to server: {}'.format(e))
def call(self, cmd):
if self.transport:
self.transport.write(cmd)
log.debug('send cmd to server: {}'.format(cmd))
else:
log.error('Invalid server transport.')
def login(self):
log.info('send cmd to server: [login]')
self.call(self.TYPE_OIP_Login)
def keepAlive(self):
log.info('send cmd to server: [keepAlive]')
self.call(self.TYPE_OIP_KeepAlive)
self.loop.call_later(5, self.keepAlive)
def startCall(self):
log.info('send cmd to server: [startCall]')
self.call(self.TYPE_OIP_StartCall)
class EchoServerClientProtocol(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
print('======== Server =========: Connection from {}'.format(peername))
self.transport = transport
def data_received(self, data):
message = data.decode()
print('======== Server =========: Data received: {!r}'.format(message))
print('======== Server =========: Send: {!r}'.format(message))
self.transport.write(data)
#
# print('Close the client socket')
# self.transport.close()
if __name__ == '__main__':
log = logging.getLogger("")
formatter = logging.Formatter("%(asctime)s %(levelname)s " +
"[%(module)s:%(lineno)d] %(message)s")
# log the things
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
loop = asyncio.get_event_loop()
bosch = Bosch(loop,
'127.0.0.1',
8888)
coro = loop.create_server(EchoServerClientProtocol, '127.0.0.1', 8888)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| [
"lishengchen@mingvale.com"
] | lishengchen@mingvale.com |
a338ca98eb0376c40f734caf4fa5facdaac9648d | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /book/oreilly/Mining.the.Social.Web/python_code/mailboxes__jsonify_mbox.py | 2680ac963760cd12584897c178e5e3f7916a77b7 | [
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | # -*- coding: utf-8 -*-
import sys
import mailbox
import email
import quopri
import json
from BeautifulSoup import BeautifulSoup
MBOX = sys.argv[1]
OUT_FILE = sys.argv[2]
def cleanContent(msg):
# Decode message from "quoted printable" format
msg = quopri.decodestring(msg)
# Strip out HTML tags, if any are present
soup = BeautifulSoup(msg)
return ''.join(soup.findAll(text=True))
def jsonifyMessage(msg):
json_msg = {'parts': []}
for (k, v) in msg.items():
json_msg[k] = v.decode('utf-8', 'ignore')
# The To, CC, and Bcc fields, if present, could have multiple items
# Note that not all of these fields are necessarily defined
for k in ['To', 'Cc', 'Bcc']:
if not json_msg.get(k):
continue
json_msg[k] = json_msg[k].replace('\n', '').replace('\t', '').replace('\r'
, '').replace(' ', '').decode('utf-8', 'ignore').split(',')
try:
for part in msg.walk():
json_part = {}
if part.get_content_maintype() == 'multipart':
continue
json_part['contentType'] = part.get_content_type()
content = part.get_payload(decode=False).decode('utf-8', 'ignore')
json_part['content'] = cleanContent(content)
json_msg['parts'].append(json_part)
except Exception, e:
sys.stderr.write('Skipping message - error encountered (%s)\n' % (str(e), ))
finally:
return json_msg
# There's a lot of data to process, so use a generator to do it. See http://wiki.python.org/moin/Generators
# Using a generator requires a trivial custom encoder be passed to json for serialization of objects
class Encoder(json.JSONEncoder):
def default(self, o): return list(o)
# The generator itself...
def gen_json_msgs(mb):
while 1:
msg = mb.next()
if msg is None:
break
yield jsonifyMessage(msg)
mbox = mailbox.UnixMailbox(open(MBOX, 'rb'), email.message_from_file)
json.dump(gen_json_msgs(mbox),open(OUT_FILE, 'wb'), indent=4, cls=Encoder)
| [
"xenron@outlook.com"
] | xenron@outlook.com |
8bb7448cd9061324f96d92191702074a4a55d7e4 | 4b0467a0e75e632a56af2a2ebc0abe257fd88544 | /fasttext_mlp/main.py | 57d72d023b737ef6904698db7b44ab371739eb8f | [] | no_license | sobamchan/simple_roc_cloze | 89f903d0fb100e4ee979c543f586a3223b3eac27 | ed24eeb615d8348acf7ad5c0c112a94c011600bf | refs/heads/master | 2020-03-27T22:15:54.703835 | 2018-09-13T13:01:56 | 2018-09-13T13:01:56 | 147,217,710 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | import os
import argparse
from pathlib import Path
import torch
from fasttext_mlp.trainer import Trainer
from logger import Logger
def getargs():
p = argparse.ArgumentParser()
p.add_argument('--odir', type=str)
p.add_argument('--gpu-id', default=2)
p.add_argument('--no-cuda', action='store_false')
p.add_argument('--epochs', type=int, default=1000)
dpath = '../DATA/ROC/cloze_test_val__spring2016_cloze_test_ALL_val'
p.add_argument('--ddir', type=str,
default=dpath)
dpath = '../DATA/ROC/test_set_spring_2016.csv'
p.add_argument('--test-path', type=str,
default=dpath)
p.add_argument('--ftpath', type=str, default='../DATA/wiki.en.bin')
p.add_argument('--bsz', type=int, default=32)
p.add_argument('--lr', type=float, default=0.1)
p.add_argument('--optim-type', type=str, default='sgd')
p.add_argument('--nlayers', type=int, default=3)
p.add_argument('--nemb', type=int, default=300)
p.add_argument('--nhidden', type=int, default=500)
return p.parse_args()
def main(args, logger):
args.odir = Path(args.odir)
t = Trainer(
args.ddir,
args.bsz,
args.ftpath,
args.nlayers,
args.nemb,
args.nhidden,
args.lr,
args.optim_type,
args.use_cuda
)
best_acc = -1
lr = args.lr
for iepc in range(1, args.epochs + 1):
logger.log('%dth epoch' % iepc)
tr_loss = t.train_one_epoch(iepc)
val_acc, val_loss = t.evaluate()
if best_acc < val_acc:
best_acc = val_acc
logger.log('Best accuracy achived: %f!!!' % val_acc)
t.make_submission(args.test_path, args.odir)
logger.log('Making submission to %s' % args.odir)
else:
for pg in t.optimizer.param_groups:
lr *= 0.8
pg['lr'] = lr
logger.log('Decrease lr to %f' % lr)
logger.dump({
'epoch': iepc,
'tr_loss': tr_loss,
'val_loss': val_loss,
'val_acc': val_acc,
})
if __name__ == '__main__':
args = getargs()
logger = Logger(args.odir)
# GPU usage
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
logger.log('using GPU id: %s' % os.environ['CUDA_VISIBLE_DEVICES'])
if not args.no_cuda and torch.cuda.is_available():
args.use_cuda = True
else:
args.use_cuda = False
logger.log(str(args))
main(args, logger)
| [
"oh.sore.sore.soutarou@gmail.com"
] | oh.sore.sore.soutarou@gmail.com |
d86338a9bec602205cf0f357d8f1b2622b5cb005 | 5051f0d301e1e6ab8cb434c50c95697036d4d7ae | /02_colors/create_image.py | 6824ae559bbf17e842fabedb907ad1ca0f97c841 | [] | no_license | Python-Repository-Hub/image_processing_with_python | 41590e36d466f2e7017a832419ed1827f305c23f | d0b6a32c893e0c0ed9c9638bbf1632d950adabf8 | refs/heads/main | 2023-08-31T10:59:11.551382 | 2021-10-20T14:07:02 | 2021-10-20T14:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # create_image.py
from PIL import Image
from PIL import ImageColor
def create_image(path, size):
image = Image.new("RGBA", size)
red = ImageColor.getcolor("red", "RGBA")
green = ImageColor.getcolor("green", "RGBA")
color = red
count = 0
for y in range(size[1]):
for x in range(size[0]):
if count == 5:
# swap colors
color = red if red != color else green
count = 0
image.putpixel((x, y), color)
count += 1
image.save(path)
if __name__ == "__main__":
create_image("lines.png", (150, 150))
| [
"mike@pythonlibrary.org"
] | mike@pythonlibrary.org |
75d2fa1bf0944c5ed7d6312b208a487ecb4b9b66 | ac617cb51ae396d932ee843af58becad8cf64e53 | /gammapy/utils/distributions/tests/test_general_random.py | f49e4b60262ea41fa93a4b91d61da5d931e5c9a6 | [
"BSD-3-Clause"
] | permissive | dlennarz/gammapy | aef7f14bb84385d017c180b425b5bc3800beb343 | e7e386047f7dfd7d1e50edd5b2615f484b98c664 | refs/heads/master | 2020-04-05T17:58:22.383376 | 2019-05-28T13:28:57 | 2019-05-28T13:28:57 | 40,043,940 | 0 | 0 | null | 2019-05-28T12:39:39 | 2015-08-01T10:59:02 | Python | UTF-8 | Python | false | false | 354 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from ..general_random import GeneralRandom
def f(x):
return x ** 2
def test_general_random():
general_random = GeneralRandom(f, 0, 3)
vals = general_random.draw(random_state=42)
assert_allclose(vals.mean(), 2.229301, atol=1e-4)
| [
"Deil.Christoph@gmail.com"
] | Deil.Christoph@gmail.com |
ef6f0cf116e3e5302a0e5eaa1724bd496d65f9e4 | 0c6baaf9324e5ff2af96e23c44a62e5f6962e502 | /module11_VCF.py | 831f4c9939b1992537447faec6f96db8292bad38 | [] | no_license | friedpine/modules | 8d77c7f88bf09884adf57115d60111afdae6d049 | 49af041990e58fa985dacbaf7c8edaa15fbd74a1 | refs/heads/master | 2016-09-06T17:17:18.556680 | 2015-05-25T07:28:52 | 2015-05-25T07:28:52 | 20,153,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,453 | py | import re, sys, os, copy
import subprocess
import time
import cPickle as pickle
import infra01_pos2info as in1
import MySQLdb as mb
import d00_sample as d00
def read_VCF_file(path,DB_NAME,tablename,limit,counts,samples):
conn=mb.connect(host="localhost",user="root",passwd="123456",db=DB_NAME)
cursor = conn.cursor()
sample_infos = ''
for sample in samples:
sample_info = " %s_G varchar(5) DEFAULT NULL,%s_0 int(11) DEFAULT NULL,%s_1 int(11) DEFAULT NULL,%s_2 int(11) DEFAULT NULL,%s_DP int(11) DEFAULT NULL,%s_GQ int(11) DEFAULT NULL," %(sample,sample,sample,sample,sample,sample)
sample_infos += sample_info
sql = """CREATE TABLE %s (
`chr` varchar(20) NOT NULL DEFAULT '',
`pos` int(11) NOT NULL DEFAULT '0',
`Ref` varchar(30) DEFAULT NULL,
`Alt` varchar(30) NOT NULL DEFAULT '',
`Qual` float DEFAULT NULL,
`DP` int(11) DEFAULT NULL,
`FQ` float DEFAULT NULL,
`AF1` float DEFAULT NULL,
`AC1` float DEFAULT NULL,
%s
PRIMARY KEY (`chr`,`pos`,`Alt`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1""" %(tablename,sample_infos)
try:
cursor.execute(sql)
except:
print "EXISTS"
file = open(path)
values = []
for line in file:
if re.search('#',line):
continue
t = re.split('\s*',line)
info = {}
for i in re.split(';',t[7]):
a = re.split('=',i)
if len(a)>1:
info[a[0]] = a[1]
if len(t[3])>limit:
t[3]=t[3][0:20]
continue
if len(t[4])>limit:
t[4]=t[4][0:limit]
continue
value = (t[0],t[1],t[3],t[4],t[5],info['DP'],info['FQ'],info['AF1'],info['AC1'])
for i in range(counts):
value += tuple(re.split(':|,',t[9+i]))
if len(value)!=9+counts*6:
a = 10
else:
values.append(value)
cmd = "insert into "+tablename+" values(%s"+",%s"*(8+counts*6)+")"
cursor.executemany(cmd,values);
conn.commit()
cursor.close()
conn.close()
def read_VCF_file_single(cursor,conn,DB_NAME,tablename,samples,type):
limit = 30
sample_infos = ''
for sample in samples:
sample_info = " %s_DP varchar(5) DEFAULT '0',%s_alt float DEFAULT '0'," %(sample,sample)
sample_infos += sample_info
sql = """CREATE TABLE %s (
`chr` varchar(20) NOT NULL DEFAULT '',
`pos` int(11) NOT NULL DEFAULT '0',
`Ref` varchar(30) DEFAULT NULL,
`Alt` varchar(30) NOT NULL DEFAULT '',
%s
PRIMARY KEY (`chr`,`pos`,`Alt`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1""" %(tablename,sample_infos)
print sql
try:
cursor.execute(sql)
except:
print "EXISTS"
for sample in samples:
path = d00.get_sample_file(cursor,sample,type)
file = open(path)
values = []
for line in file:
if re.search('#',line):
continue
t = re.split('\s*',line)
info = {}
for i in re.split(';',t[7]):
a = re.split('=',i)
if len(a)>1:
info[a[0]] = a[1]
if 'DP4' not in info:
continue
DP4 = re.split(',',info['DP4'])
if len(t[3])>limit:
t[3]=t[3][0:limit]
continue
if len(t[4])>limit:
t[4]=t[4][0:limit]
continue
value = (t[0],t[1],t[3],t[4],info['DP'],float(int(DP4[2])+int(DP4[3]))/int(info['DP']))
values.append(value)
cmd = "insert into %s (chr,pos,Ref,Alt,%s,%s)values(%%s,%%s,%%s,%%s,%%s,%%s) on duplicate key update %s=values(%s),%s=values(%s)" %(tablename,sample+'_DP',sample+'_alt',sample+'_DP',sample+'_DP',sample+'_alt',sample+'_alt')
print cmd,values[0]
cursor.executemany(cmd,values)
conn.commit()
cursor.close()
conn.close()
class SNP(dict):
def __init__(self):
print "SNP class welcomes you!"
# def read_VCF_file(self,path,sample_names):
# self['samples'] = sample_names
# file = open(path)
# values = []
# for line in file:
# if re.search('#',line):
# continue
# t = re.split('\s*',line)
# info = re.split(t[7]
def find_good_quality_SNP_pos(self,group,names,goodsize,QUAL_off,GQ_off,rec):
self['groupnames'] = names
self[rec] = {}
indexs = []
for i in range(len(names)):
temp = []
for j in range(len(group)):
if group[j] == i:
temp.append(j)
indexs.append(temp)
for chr in self['chrs']:
for pos in self[chr]:
if self[chr][pos]['QUAL'] < QUAL_off:
continue
self[chr][pos]['group_GT'] = ['NA','NA']
for groupid,i in enumerate(indexs):
types = []
number = 0
for j in i:
if self[chr][pos]['GQ'][j] >= GQ_off:
types.append(self[chr][pos]['GT'][j])
counts = dict([(i, types.count(i)) for i in types])
GroupType = 'NA'
for gt in counts:
if counts[gt] >= goodsize[groupid]:
GroupType = gt
self[chr][pos]['group_GT'][groupid] = GroupType
if 'NA' not in self[chr][pos]['group_GT']:
counts = dict([(i, types.count(i)) for i in self[chr][pos]['group_GT']])
if len(counts) == 2:
if chr not in self[rec]:
self[rec][chr] = {}
self[rec][chr][pos] = {}
self[rec][chr][pos]['GT'] = self[chr][pos]['group_GT']
self[rec][chr][pos]['ref'] = self[chr][pos]['ref']
self[rec][chr][pos]['alt'] = self[chr][pos]['alt']
def get_pos_infos(self,rec,db1,db2):
poses = copy.deepcopy(self[rec])
in1.get_infos(db1,db2,poses)
self[rec] = poses
def select_target_genes(self,rec,type,genetypes,file):
outfile = open(file,'w')
for chr in self[rec]:
for pos in self[rec][chr]:
temp = self[rec][chr][pos]
if self[rec][chr][pos][type]['raw'] == []:
continue
if self[rec][chr][pos]['GT'] not in genetypes:
continue
print >>outfile,chr,pos,temp['ref'],temp['alt'],temp[type]['genes'][0],temp[type]['transc'][0]
outfile.close()
| [
"friedpine@gmail.com"
] | friedpine@gmail.com |
e7c6599fa4227881a3af7c41cf4947529bf81dab | 06eee979fbd6ed21a83e66ba3a81308bc54b946e | /scripts/decompiler_scripts/util.py | 3272bc18a593799a6549b16b8e89401073c24562 | [] | no_license | j4M0Fj1MMy/ghcc | b3e5e7dedc26d844baf74ae293d27993a5ef75bd | e5ed776bd444cc1ba76daa1baba1856b48814f81 | refs/heads/master | 2023-08-04T08:04:09.353865 | 2021-09-24T14:14:30 | 2021-09-24T14:14:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,770 | py | from collections import defaultdict
import ida_hexrays
import ida_lines
import ida_pro
import json
import re
UNDEF_ADDR = 0xFFFFFFFFFFFFFFFF
hexrays_vars = re.compile("^(v|a)[0-9]+$")
def get_expr_name(expr):
name = expr.print1(None)
name = ida_lines.tag_remove(name)
name = ida_pro.str2user(name)
return name
class CFuncGraph:
def __init__(self, highlight):
self.items = [] # list of citem_t
self.reverse = dict() # citem_t -> node #
self.succs = [] # list of lists of next nodes
self.preds = [] # list of lists of previous nodes
self.highlight = highlight
def nsucc(self, n):
return len(self.succs[n]) if self.size() else 0
def npred(self, n):
return len(self.preds[n]) if self.size() else 0
def succ(self, n, i):
return self.succs[n][i]
def pred(self, n, i):
return self.preds[n][i]
def size(self):
return len(self.preds)
def add_node(self):
n = self.size()
def resize(array, new_size):
if new_size > len(array):
while len(array) < new_size:
array.append([])
else:
array = array[:new_size]
return array
self.preds = resize(self.preds, n+1)
self.succs = resize(self.succs, n+1)
return n
def add_edge(self, x, y):
self.preds[y].append(x)
self.succs[x].append(y)
def get_pred_ea(self, n):
if self.npred(n) == 1:
pred = self.pred(n, 0)
pred_item = self.items[pred]
if pred_item.ea == UNDEF_ADDR:
return self.get_pred_ea(pred)
return pred_item.ea
return UNDEF_ADDR
def get_node_label(self, n):
item = self.items[n]
op = item.op
insn = item.cinsn
expr = item.cexpr
parts = [ida_hexrays.get_ctype_name(op)]
if op == ida_hexrays.cot_ptr:
parts.append(".%d" % expr.ptrsize)
elif op == ida_hexrays.cot_memptr:
parts.append(".%d (m=%d)" % (expr.ptrsize, expr.m))
elif op == ida_hexrays.cot_memref:
parts.append(" (m=%d)" % (expr.m,))
elif op in [
ida_hexrays.cot_obj,
ida_hexrays.cot_var]:
name = get_expr_name(expr)
parts.append(".%d %s" % (expr.refwidth, name))
elif op in [
ida_hexrays.cot_num,
ida_hexrays.cot_helper,
ida_hexrays.cot_str]:
name = get_expr_name(expr)
parts.append(" %s" % (name,))
elif op == ida_hexrays.cit_goto:
parts.append(" LABEL_%d" % insn.cgoto.label_num)
elif op == ida_hexrays.cit_asm:
parts.append("<asm statements; unsupported ATM>")
# parts.append(" %a.%d" % ())
parts.append(", ")
parts.append("ea: %08X" % item.ea)
if item.is_expr() and not expr is None and not expr.type.empty():
parts.append(", ")
tstr = expr.type._print()
parts.append(tstr if tstr else "?")
return "".join(parts)
# Puts the tree in a format suitable for JSON
def json_tree(self, n):
# Each node has a unique ID
node_info = { "node_id" : n }
item = self.items[n]
# This is the type of ctree node
node_info["node_type"] = ida_hexrays.get_ctype_name(item.op)
# This is the type of the data (in C-land)
if item.is_expr() and not item.cexpr.type.empty():
node_info["type"] = item.cexpr.type._print()
node_info["address"] = "%08X" % item.ea
if item.ea == UNDEF_ADDR:
node_info["parent_address"] = "%08X" % self.get_pred_ea(n)
# Specific info for different node types
if item.op == ida_hexrays.cot_ptr:
node_info["pointer_size"] = item.cexpr.ptrsize
elif item.op == ida_hexrays.cot_memptr:
node_info.update({
"pointer_size": item.cexpr.ptrsize,
"m": item.cexpr.m
})
elif item.op == ida_hexrays.cot_memref:
node_info["m"] = item.cexpr.m
elif item.op == ida_hexrays.cot_obj:
node_info.update({
"name": get_expr_name(item.cexpr),
"ref_width": item.cexpr.refwidth
})
elif item.op == ida_hexrays.cot_var:
_, var_id, old_name, new_name = get_expr_name(item.cexpr).split("@@")
node_info.update({
"var_id": var_id,
"old_name": old_name,
"new_name": new_name,
"ref_width": item.cexpr.refwidth
})
elif item.op in [ida_hexrays.cot_num,
ida_hexrays.cot_str,
ida_hexrays.cot_helper]:
node_info["name"] = get_expr_name(item.cexpr)
# Get info for children of this node
successors = []
x_successor = None
y_successor = None
z_successor = None
for i in xrange(self.nsucc(n)):
successors.append(self.succ(n, i))
successor_trees = []
if item.is_expr():
if item.x:
for s in successors:
if item.x == self.items[s]:
successors.remove(s)
x_successor = self.json_tree(s)
break
if item.y:
for s in successors:
if item.y == self.items[s]:
successors.remove(s)
y_successor = self.json_tree(s)
break
if item.z:
for s in successors:
if item.z == self.items[s]:
successors.remove(s)
z_successor = self.json_tree(s)
break
if successors:
for succ in successors:
successor_trees.append(self.json_tree(succ))
if successor_trees != []:
node_info["children"] = successor_trees
if x_successor:
node_info["x"] = x_successor
if y_successor:
node_info["y"] = y_successor
if z_successor:
node_info["z"] = z_successor
return node_info
def print_tree(self):
tree = json.dumps(self.json_tree(0))
print(tree)
def dump(self):
print("%d items:" % len(self.items))
for idx, item in enumerate(self.items):
print("\t%d: %s" % (idx, ida_hexrays.get_ctype_name(item.op)))
# print("\t%d: %s" % (idx, self.get_node_label(idx)))
print("succs:")
for parent, s in enumerate(self.succs):
print("\t%d: %s" % (parent, s))
print("preds:")
for child, p in enumerate(self.preds):
print("\t%d: %s" % (child, p))
class GraphBuilder(ida_hexrays.ctree_parentee_t):
def __init__(self, cg):
ida_hexrays.ctree_parentee_t.__init__(self)
self.cg = cg
def add_node(self, i):
n = self.cg.add_node()
if n <= len(self.cg.items):
self.cg.items.append(i)
self.cg.items[n] = i
self.cg.reverse[i] = n
return n
def process(self, i):
n = self.add_node(i)
if n < 0:
return n
if len(self.parents) > 1:
lp = self.parents.back().obj_id
for k, v in self.cg.reverse.items():
if k.obj_id == lp:
p = v
break
self.cg.add_edge(p, n)
return 0
def visit_insn(self, i):
return self.process(i)
def visit_expr(self, e):
return self.process(e)
| [
"huzecong@gmail.com"
] | huzecong@gmail.com |
62ea943585b3d94a4bea611c7b24d5c490e7e4ae | ed1cc52f25caa9b57679d5b74a97a99f40ebbb05 | /saveData.py | 0f437123682ef6a17ed6ff7463f265f5c604d362 | [] | no_license | west789/twitterCrawl | fb26400905ec661be61a8b8dbf61f156f0705e25 | 80cb61cff59575844cc6d60e2c9dee712481f082 | refs/heads/master | 2022-12-14T16:58:05.379439 | 2018-07-26T08:29:57 | 2018-07-26T08:29:57 | 139,787,645 | 0 | 1 | null | 2022-01-21T19:24:23 | 2018-07-05T02:57:14 | Python | UTF-8 | Python | false | false | 6,003 | py | import pymysql
from loggingModule import logger
class MysqlDB(object):
def __init__(self):
try:
self.conn = pymysql.connect(
'localhost',
'root',
'123',
'twittershowtest',
charset='utf8mb4')
self.cursor = self.conn.cursor()
except Exception as e:
logger.info('连接数据库失败:%s' % str(e))
def close(self):
self.cursor.close()
self.conn.close()
class TwitterPip(MysqlDB):
def insert_userInfo(self, itemDict):
sql = """
INSERT INTO account (accountName, twitterId, screenName, location, description,
url, statusesCount, friendsCount, followersCount, favoritesCount,
accountTime, profileImage, bannerUrl) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
try:
self.cursor.execute(
sql, (itemDict["accountName"], itemDict["twitterId"],
itemDict["screenName"], itemDict["location"],
itemDict["description"], itemDict["url"],
itemDict["statusesCount"], itemDict["friendsCount"],
itemDict["followersCount"], itemDict["favoritesCount"],
itemDict["accountTime"], itemDict["profileImage"],
itemDict["bannerUrl"]))
self.conn.commit()
# print("插入 %s 账户信息成功" % itemDict["screenName"])
except Exception as e:
self.conn.rollback()
logger.info("插入 %s 账户信息失败 %s" % (itemDict["screenName"], str(e)))
return "error"
def insert_tweetInfo(self, itemDict, flag):
sql = """
INSERT INTO tweets (accountId, tweetsText, tweetsUrl, videoUrl, imageUrl, retweetCount,
tweetFavCount, tweetTime, twitterId) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
try:
self.cursor.execute(
sql, (itemDict["accountId"], itemDict["tweetsText"],
itemDict["tweetsUrl"], itemDict["videoUrl"],
itemDict["imageUrl"], itemDict["retweetCount"],
itemDict["favoriteCount"], itemDict["tweetTime"],
itemDict["twitterId"]))
self.conn.commit()
# print("插入推文信息成功")
flag += 1
return flag
except Exception as e:
self.conn.rollback()
logger.info("插入 %s 推文信息失败 %s" % (itemDict["twitterId"], str(e)))
return flag
def update_userInfo(self, itemDict, screenName):
sql = """
update account set accountName=%s, screenName=%s,
twitterId=%s, location=%s,
description=%s, url=%s,
statusesCount=%s, friendsCount=%s,
followersCount=%s, favoritesCount=%s,
accountTime=%s, profileImage=%s,
bannerUrl=%s where screenName=%s
"""
try:
self.cursor.execute(
sql, (itemDict["accountName"], itemDict["screenName"],
itemDict["twitterId"], itemDict["location"],
itemDict["description"], itemDict["url"],
itemDict["statusesCount"], itemDict["friendsCount"],
itemDict["followersCount"], itemDict["favoritesCount"],
itemDict["accountTime"], itemDict["profileImage"],
itemDict["bannerUrl"], itemDict["screenName"]))
self.conn.commit()
# print("更新 %s 账户信息成功" % itemDict["screenName"])
except Exception as e:
self.conn.rollback()
logger.info("更新 %s 账户信息失败,%s" % (itemDict["screenName"], str(e)))
return "error"
# 获取twitterId的列表
def get_twitterIdList(self):
sql = "select twitterId from account"
# cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
self.cursor.execute(sql)
idTuple = self.cursor.fetchall()
idList = [item[0] for item in idTuple]
return idList
except Exception as e:
self.conn.rollback()
logger.info("执行sql语句失败%s:%s" % (str(e), sql))
return []
#获取screenName列表
def get_screenName(self):
sql = "SELECT screenName FROM account"
try:
self.cursor.execute(sql)
nameTuple = self.cursor.fetchall()
nameList = [item[0] for item in nameTuple]
return nameList
except Exception as e:
self.conn.rollback()
logger.info("执行sql语句失败%s:%s" % (str(e), sql))
return []
# 获取accountId
def get_accountId(self, twitterId):
sql = "select accountId from account where twitterId =%s" % twitterId
try:
self.cursor.execute(sql)
accountId = self.cursor.fetchone()
return accountId[0]
except Exception as e:
self.conn.rollback()
logger.info("执行sql语句失败%s:%s" % (str(e), sql))
return ""
# 获取最近插入的Id
def get_sinceId(self, accountId):
sql = "SELECT tweets.twitterId from tweets where accountId=%s ORDER BY tweets.tweetsId desc LIMIT 1" % accountId
try:
self.cursor.execute(sql)
sinceId = self.cursor.fetchone()
if sinceId != None:
return sinceId[0]
else:
return None
except Exception as e:
self.conn.rollback()
logger.info("执行sql语句失败%s" % str(e))
return None
| [
"738758058@qq.com"
] | 738758058@qq.com |
4dca708fc3cb0a96329444808619dac71dfb1f5c | f00c168128e47040486af546a0859811c638db3d | /dic32 | ff6395d525f6b1d36b6f9618a46a36b5f1355992 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | sbp/dic32 | 16e7741cbafd42283e866f788f9faa4e16b9f61e | 14745ecee813c2a590b430ed18c4d867848291d4 | refs/heads/master | 2016-09-05T22:45:47.393067 | 2015-02-14T15:59:55 | 2015-02-14T15:59:55 | 30,799,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,951 | #!/usr/bin/env python3
"""
dic32 - Data Integrity Checker
Stores the CRC32 of inodes in an Sqlite3 database
Written by Sean B. Palmer
"How many bits on your disk are corrupted and were propagated to your backups?
You have no way to know. We've had the solution for decades." (@garybernhardt)
NO WARRANTY, NO GUARANTEES
$ dic32 update ~/.dic32.sqlite3 ~/
"""
import os
import sqlite3
import struct
import sys
import time
import zlib
def error(message):
print("Error:", message, file=sys.stderr)
sys.exit(1)
class PersistentDictionary(object):
def __init__(self, filename, *, journal_mode="DELETE"):
schema = "(key BLOB PRIMARY KEY, value BLOB)"
self.connection = sqlite3.connect(filename)
self.connection.execute("PRAGMA journal_mode = %s" % journal_mode)
self.connection.execute("CREATE TABLE IF NOT EXISTS dict " + schema)
self.connection.commit()
def select_one(self, query, arg=None):
try: return next(iter(self.connection.execute(query, arg)))
except StopIteration:
return None
def commit(self):
if self.connection is not None:
self.connection.commit()
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def __contains__(self, key):
query = "SELECT 1 from dict where key = ?"
return self.select_one(query, (key,))
def __getitem__(self, key):
query = "SELECT value FROM dict WHERE key = ?"
item = self.select_one(query, (key,))
if item is None:
raise KeyError(key)
return item[0]
def __setitem__(self, key, value):
query = "REPLACE INTO dict (key, value) VALUES (?, ?)"
self.connection.execute(query, (key, value))
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
query = "DELETE FROM dict WHERE key = ?"
self.connection.execute(query, (key,))
def keys(self):
query = "SELECT key FROM dict ORDER BY rowid"
return [key[0] for key in self.connection.execute(query)]
def walk(directory):
for root, _, files in os.walk(directory):
for name in files:
yield os.path.join(root, name)
def crc32(path):
with open(path, "rb") as f:
checksum = 0
while True:
octets = f.read(33554432) # 32 MB
if not octets:
break
checksum = zlib.crc32(octets, checksum)
return checksum
def pack(integer):
return struct.pack(">I", integer)
def dic32_path(db, path, update, force, cache, log):
stat = os.stat(path)
inode = pack(stat.st_ino)
modified = pack(int(os.path.getmtime(path)))
if inode in db:
log["R"].discard(inode)
metadata = db[inode]
if metadata.startswith(modified):
checksum = pack(crc32(path))
if not metadata.endswith(checksum):
if update and force:
cache[inode] = modified + checksum
log["M"].append(path)
elif update:
checksum = pack(crc32(path))
cache[inode] = modified + checksum
elif update:
checksum = pack(crc32(path))
cache[inode] = modified + checksum
return stat.st_size
def dic32(filename, directory, *, update=False, force=False, verbose=False):
db = PersistentDictionary(filename)
log = {"M": [], "U": 0, "R": set(db.keys()), "X": []}
cache = {}
def sync(db, cache):
for key in cache:
db[key] = cache[key]
log["U"] += len(cache)
db.commit()
cache.clear()
processed = 0
total = 0
status = "\rProcessed %s files, %s MB"
started = time.time()
for path in walk(directory):
args = (db, path, update, force, cache, log)
try: size = dic32_path(*args)
except (FileNotFoundError, PermissionError):
log["X"].append(path)
continue
processed += 1
total += size
if not (processed % 10):
sys.stderr.write(status % (processed, total // 1000000))
if update and (len(cache) > 8192):
sync(db, cache)
status += " in %s seconds" % round(time.time() - started, 2)
sys.stderr.write(status % (processed, total // 1000000))
if update and log["R"]:
for key in log["R"]:
del db[key]
db.commit()
if update and cache:
sync(db, cache)
print("")
results = [(len(log["M"]), "Mismatched")]
if update:
results.append((log["U"], "Updated"))
results.append((len(log["R"]), "Removed"))
if log["X"]:
results.append((len(log["X"]), "Unreadable"))
print(", ".join("%s %s" % pair for pair in results))
for m in log["M"]:
print("M", m)
if verbose:
for r in log["R"]:
print("R", r)
for x in log["X"]:
print("X", x)
db.close()
def main(argv=None):
argv = sys.argv if (argv is None) else argv
if len(argv) != 4:
error("Usage: dic32 ( check | update | force ) FILENAME DIRECTORY")
action = argv[1]
filename = argv[2]
directory = argv[3]
if action not in {"check", "update", "force"}:
error("Action must be check, update, or force")
verbose = "DIC32_VERBOSE" in os.environ
if not os.path.isdir(directory):
error("Not a directory: %s" % directory)
if action == "check":
if not os.path.isfile(filename):
error("Database does not exist: %s" % filename)
dic32(filename, directory, update=False, verbose=verbose)
elif action == "update":
dic32(filename, directory, update=True, verbose=verbose)
elif action == "force":
dic32(filename, directory, update=True, force=True, verbose=verbose)
else:
error("Please report this bug")
if __name__ == "__main__":
main()
| [
"sean@miscoranda.com"
] | sean@miscoranda.com | |
ab25523563b959e12a6ccce7ec3aef79dff2148d | dd88ea11e3a81532eaf92b8e92a0cf322761cc0b | /pyrolite_meltsutil/automation/org.py | ed44f319ece52a437469a510265446133bbc5ad6 | [
"MIT"
] | permissive | JustinGOSSES/pyrolite-meltsutil | acfdc8d5a9f98c67f8c1f8ec0929101743147b71 | 302b3d51c311c29803eb48ac9dc79a393b43644b | refs/heads/master | 2020-12-23T11:41:02.319498 | 2019-12-19T07:40:45 | 2019-12-19T07:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,101 | py | from pathlib import Path
from ..parse import read_envfile, read_meltsfile
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
def make_meltsfolder(
name, title=None, meltsfile=None, indir=None, env="./alphamelts_default_env.txt"
):
"""
Create a folder for a given meltsfile, including the default environment file.
From this folder, pass these to alphamelts with
:code:`run_alphamelts.command -m <meltsfile> -f <envfile>`.
Parameters
-----------
name : :class:`str`
Name of the folder.
title : :class:`str`
Title of the experiment. This will be the meltsfile name.
meltsfile : :class:`str`
String containing meltsfile info.
indir : :class:`str` | :class:`pathlib.Path`
Path to the base directory to create melts folders in.
env : :class:`str` | :class:`pathlib.Path`
Path to a specific environment file to use as the default environment for the
experiment.
Returns
--------
:class:`pathlib.Path`
Path to melts folder.
Todo
------
* Options for naming environment files
"""
if indir is None:
indir = Path("./")
else:
indir = Path(indir)
assert meltsfile is not None
name = str(name) # need to pathify this!
title = title or name
title = str(title) # need to pathify this!
experiment_folder = indir / name
if not experiment_folder.exists():
experiment_folder.mkdir(parents=True)
meltsfile, mpath = read_meltsfile(meltsfile)
assert experiment_folder.exists() and experiment_folder.is_dir()
(experiment_folder / title).with_suffix(".melts").touch()
with open(str((experiment_folder / title).with_suffix(".melts")), "w") as f:
f.write(meltsfile)
(experiment_folder / "environment").with_suffix(".txt").touch()
env, epath = read_envfile(env, unset_variables=False)
with open(str(experiment_folder / "environment.txt"), "w") as f:
f.write(env)
return experiment_folder # return the folder name
| [
"morgan.j.williams@hotmail.com"
] | morgan.j.williams@hotmail.com |
d511155df42302d7dee46fb1c3caaee969a3d002 | 41586d36dd07c06860b9808c760e2b0212ed846b | /multimedia/misc/frei0r-plugins/actions.py | 0f10118074bac121f98cd12d31b19d5ce4bcc836 | [] | no_license | SulinOS/SulinRepository | 4d5551861f57bc1f4bec6879dfe28ce68c7c125d | 9686811a1e06080f63199233561a922fe1f78d67 | refs/heads/master | 2021-06-15T21:34:25.039979 | 2021-06-05T13:43:34 | 2021-06-05T13:43:34 | 207,672,864 | 6 | 3 | null | 2019-12-06T08:11:22 | 2019-09-10T22:16:17 | Python | UTF-8 | Python | false | false | 558 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import get
def setup():
autotools.autoreconf("-fi")
autotools.configure("--enable-static=no")
inarytools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
| [
"zaryob.dev@gmail.com"
] | zaryob.dev@gmail.com |
b89b394f326b9bb99406cdee04b7a6d8538658cc | 8f2e6e38bb7ba2205cba57b0beae146d29f0ad3b | /chap13/chap13_3_selenium.py | 7a75060cf766af6f56aba7124bbe4f32240841db | [] | no_license | KimDoKy/WebScrapingWithPython | fa08ba83ba560d4f24cddb5e55de938a380dfec2 | bc7dd8a36d3ee0f8e3a13ae9fe0d074733b45938 | refs/heads/master | 2020-12-02T19:20:26.285450 | 2017-08-25T14:27:36 | 2017-08-25T14:27:36 | 96,326,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from selenium import webdriver
driver = webdriver.PhantomJS()
driver.get("http://en.wikipedia.org/wiki/Monty_Python")
assert "Monty Python" in driver.title
driver.close()
| [
"makingfunk0@gmail.com"
] | makingfunk0@gmail.com |
7ea89b3047d5c51986abea1b17fc9af667098cc7 | 7e86a9bd9ec1f82838d114bf71ad0f6d0f12152c | /venv/Lib/site-packages/stellar_sdk/xdr/create_claimable_balance_op.py | a08ac92193d3c35bbd17c06007cb7fd652e6f8b7 | [
"MIT"
] | permissive | yunoUNo/fini | b39688e7203d61f031f2ae9686845b0beccd9b2a | a833bc64a3aaf94f7268ec6eac690aa68327dd96 | refs/heads/master | 2023-08-05T17:42:48.726825 | 2021-09-29T13:30:32 | 2021-09-29T13:30:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from typing import List
from xdrlib import Packer, Unpacker
from .asset import Asset
from .claimant import Claimant
from .int64 import Int64
from ..exceptions import ValueError
__all__ = ["CreateClaimableBalanceOp"]
class CreateClaimableBalanceOp:
"""
XDR Source Code
----------------------------------------------------------------
struct CreateClaimableBalanceOp
{
Asset asset;
int64 amount;
Claimant claimants<10>;
};
----------------------------------------------------------------
"""
def __init__(self, asset: Asset, amount: Int64, claimants: List[Claimant],) -> None:
if claimants and len(claimants) > 10:
raise ValueError(
f"The maximum length of `claimants` should be 10, but got {len(claimants)}."
)
self.asset = asset
self.amount = amount
self.claimants = claimants
def pack(self, packer: Packer) -> None:
self.asset.pack(packer)
self.amount.pack(packer)
packer.pack_uint(len(self.claimants))
for claimant in self.claimants:
claimant.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "CreateClaimableBalanceOp":
asset = Asset.unpack(unpacker)
amount = Int64.unpack(unpacker)
length = unpacker.unpack_uint()
claimants = []
for _ in range(length):
claimants.append(Claimant.unpack(unpacker))
return cls(asset=asset, amount=amount, claimants=claimants,)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "CreateClaimableBalanceOp":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "CreateClaimableBalanceOp":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.asset == other.asset
and self.amount == other.amount
and self.claimants == other.claimants
)
def __str__(self):
out = [
f"asset={self.asset}",
f"amount={self.amount}",
f"claimants={self.claimants}",
]
return f"<CreateClaimableBalanceOp {[', '.join(out)]}>"
| [
"quit5123@gmail.com"
] | quit5123@gmail.com |
665d8b464db158daf705f91e1bdc952158b59dad | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW06_20210715222523.py | c8441e49fc6ed194a0b5e99c8c46baf123f714d4 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,861 | py | """
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
averageAge = 0.0
employeeCount = 0
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = int(line[3])
if yearHired < year:
averageAge += age
employeeCount += 1
averageAge /= employeeCount
return round(averageAge
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
# print(findCuisine('restaurants.txt', 'Mexican'))
#print(restaurantFilter('restaurants.txt'))
#print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
print(seniorStaffAverage('employees.csv', 2017))
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
502fb8fda03e0e672e7875aada6fd4bd4571f503 | 9bf3aea78c25029ecfe6bca22c9b03cabc72d1d0 | /NellBasic/WrapUpNELLToTrecWebWithDesp.py | 8a2bb659ecf55b119df55f9ff45f8d0bf6188cdc | [] | no_license | xiongchenyan/Nell | 1acee1d4c4b577b4e6ddcf9f36375b3098501b0b | 621197cebfac9d3c99eac608ed4246d9a5b4a97f | refs/heads/master | 2016-09-06T09:59:01.557368 | 2014-09-09T15:00:36 | 2014-09-09T15:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | '''
Created on Dec 4, 2013
wrap up NELL, but with description extracted from CPL format
input must be sorted so that same concept in first column is grouped together
@author: cx
'''
import sys
from NellGeneralC import *
if 3 != len(sys.argv):
print "NELL General sorted + output trec web file"
sys.exit()
OneConcept = NellGeneralC()
cnt = 0
errcnt = 0
out = open(sys.argv[2],'w')
for line in open(sys.argv[1]):
line = line.strip()
if not OneConcept.Append(line):
try:
print >>out, OneConcept.OutTrecWeb().decode('utf8',"ignore")
except UnicodeEncodeError:
errcnt += 1
cnt += 1
if 0 == (cnt % 100):
print "processed [%d] concepts [%d] decode error" %(cnt,errcnt)
OneConcept = NellGeneralC()
OneConcept.Append(line)
print "finished [%d] [%d] err" %(cnt,errcnt)
out.close()
| [
"xiongchenyan@gmail.com"
] | xiongchenyan@gmail.com |
5ae6c3636ca578efd390bcb5a49eac47ea49725b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_348/ch81_2020_04_21_15_27_41_131319.py | f4b58d7777fad5addc78ae4bab64fe79ce4247e7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | def interseccao_valores (dicionario_1, dicionario_2):
lista = []
for v1 in dicionario_1.values() and v2 in dicionario_2.values():
if v1 == v2:
valor = v1
lista.append(valor)
return lista
| [
"you@example.com"
] | you@example.com |
aa7e93eb7358fdfa586a494296402d1f9590fc5c | cd7fe406ee8526901096c2a8a3239790faf69615 | /transmute_core/frameworks/aiohttp/swagger.py | d15ade471935994ace2df28df6c0e8af5fc1019f | [
"MIT"
] | permissive | pwesthagen/transmute-core | 4defb95c866dbfba29359cc4f92efc11aafb7fc1 | 4282d082377e522f5e60fe740d0cbe2315f76f50 | refs/heads/master | 2021-01-03T12:10:14.564392 | 2019-12-19T21:13:11 | 2019-12-19T21:13:11 | 240,078,641 | 0 | 0 | MIT | 2020-02-12T17:56:09 | 2020-02-12T17:56:09 | null | UTF-8 | Python | false | false | 2,024 | py | import json
from aiohttp import web
from transmute_core.swagger import (
generate_swagger_html,
get_swagger_static_root,
SwaggerSpec
)
STATIC_ROOT = "/_swagger/static"
APP_KEY = "_aiohttp_transmute_swagger"
def get_swagger_spec(app):
if APP_KEY not in app:
app[APP_KEY] = SwaggerSpec()
return app[APP_KEY]
def add_swagger(app, json_route, html_route):
"""
a convenience method for both adding a swagger.json route,
as well as adding a page showing the html documentation
"""
app.router.add_route('GET', json_route, create_swagger_json_handler(app))
add_swagger_api_route(app, html_route, json_route)
def add_swagger_api_route(app, target_route, swagger_json_route):
"""
mount a swagger statics page.
app: the aiohttp app object
target_route: the path to mount the statics page.
swagger_json_route: the path where the swagger json definitions is
expected to be.
"""
static_root = get_swagger_static_root()
swagger_body = generate_swagger_html(
STATIC_ROOT, swagger_json_route
).encode("utf-8")
async def swagger_ui(request):
return web.Response(body=swagger_body, content_type="text/html")
app.router.add_route("GET", target_route, swagger_ui)
app.router.add_static(STATIC_ROOT, static_root)
def create_swagger_json_handler(app, **kwargs):
"""
Create a handler that returns the swagger definition
for an application.
This method assumes the application is using the
TransmuteUrlDispatcher as the router.
"""
spec = get_swagger_spec(app).swagger_definition(**kwargs)
encoded_spec = json.dumps(spec).encode("UTF-8")
async def swagger(request):
return web.Response(
# we allow CORS, so this can be requested at swagger.io
headers={
"Access-Control-Allow-Origin": "*"
},
body=encoded_spec,
content_type="application/json",
)
return swagger
| [
"yusuke@tsutsumi.io"
] | yusuke@tsutsumi.io |
0011772bccdfdfb1d33255696e97dd012b166c54 | 057d2d1e2a78fc89851154e87b0b229e1e1f003b | /venv/Lib/site-packages/keystoneclient/fixture/v3.py | 596f3e2b53351713b0b699df9b992bbfb144b802 | [
"Apache-2.0"
] | permissive | prasoon-uta/IBM-Cloud-Secure-File-Storage | 276dcbd143bd50b71121a73bc01c8e04fe3f76b0 | 82a6876316715efbd0b492d0d467dde0ab26a56b | refs/heads/master | 2022-12-13T00:03:31.363281 | 2018-02-22T02:24:11 | 2018-02-22T02:24:11 | 122,420,622 | 0 | 2 | Apache-2.0 | 2022-12-08T05:15:19 | 2018-02-22T02:26:48 | Python | UTF-8 | Python | false | false | 889 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1.fixture import v3
Token = v3.Token
"""A V3 Keystone token that can be used for testing.
An alias of :py:exc:`keystoneauth1.fixture.v3.Token`
"""
V3FederationToken = v3.V3FederationToken
"""A V3 Keystone Federation token that can be used for testing.
An alias of :py:exc:`keystoneauth1.fixture.v3.V3FederationToken`
"""
| [
"prasoon1812@gmail.com"
] | prasoon1812@gmail.com |
651cbe93c219554877208f789d5b3fe53c1e9c23 | 617df9a44a819edbc2ebcb2d5735e7bc96e0b765 | /lot/trees/migrations/0002_auto_20200429_1325.py | 2bbc21c6ba7bf59b073fc309ef126b16ee438901 | [
"BSD-3-Clause"
] | permissive | Ecotrust/forestplanner | 30856c4fa217f7a543ce6a4d901c53dbf2170555 | 5674741389945e9b3db068682b64f400e10efe8e | refs/heads/main | 2023-06-26T01:07:11.502948 | 2021-12-23T19:48:50 | 2021-12-23T19:48:50 | 2,982,832 | 25 | 10 | BSD-3-Clause | 2023-03-13T22:17:10 | 2011-12-14T20:34:37 | JavaScript | UTF-8 | Python | false | false | 2,544 | py | # Generated by Django 2.2.12 on 2020-04-29 13:25
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trees', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='carbongroup',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_carbongroup_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='forestproperty',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_forestproperty_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='myrx',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_myrx_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='scenario',
name='input_rxs',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True, verbose_name='Prescriptions associated with each stand'),
),
migrations.AlterField(
model_name='scenario',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_scenario_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='scenariostand',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_scenariostand_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='stand',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_stand_related', to='auth.Group', verbose_name='Share with the following groups'),
),
migrations.AlterField(
model_name='strata',
name='sharing_groups',
field=models.ManyToManyField(blank=True, editable=False, related_name='trees_strata_related', to='auth.Group', verbose_name='Share with the following groups'),
),
]
| [
"ryan.d.hodges@gmail.com"
] | ryan.d.hodges@gmail.com |
1a798f2a8157a2fcdbdfed698a421ef438d48931 | 30dbb8c5a5cce9dfea904924f00a1451abd0c88b | /stack2/토마토.py | b5672341712c7b34e2b9f616f8fbd084f68ddb09 | [] | no_license | gksrb2656/AlgoPractice | 7eac983509de4c5f047a880902253e477f4ca27c | 5285479625429b8ef46888c8611dc132924833b7 | refs/heads/master | 2020-12-22T17:20:33.677147 | 2020-09-22T16:05:53 | 2020-09-22T16:05:53 | 236,872,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,029 | py | from collections import deque
dr = [1,-1,0,0]
dc = [0,0,1,-1]
def BFS(G,st):
visit=[]
for i in st:
Q.append((i,1))
visit.append(i)
while Q:
rc,l = Q.popleft()
for k in range(4):
nr = rc[0] + dr[k]
nc = rc[1] + dc[k]
if nr < 0 or nc < 0 or nr > M - 1 or nc > N - 1:
continue
if G[nr][nc] == 0 and [nr,nc] not in visit:
G[nr][nc] = l+1
Q.append(([nr,nc],l+1))
elif G[nr][nc] >l+1 and [nr,nc] not in visit:
G[nr][nc] = l + 1
Q.append(([nr, nc], l + 1))
def find(G):
ans = 0
for i in range(M):
for j in range(N):
if G[i][j] == 0:
return -1
elif G[i][j] > ans:
ans = G[i][j]
return ans-1
def st_p(G):
st = []
for i in range(M):
for j in range(N):
if G[i][j] == 1:
st.append([i,j])
# def BFS(G):
# for i in range(M):
# visit = []
# for j in range(N):
# if tomato[i][j] == 1:
# Q.append([i,j])
# # visit.append([i,j])
# while Q:
# r,c = Q.popleft()
# visit.append([r,c])
# if r+1<=M-1 and [r+1,c] not in visit and tomato[r+1][c] == 0:
# Q.append([r+1,c])
# tomato[r+1][c] = tomato[r][c]+1
# elif r+1<=M-1 and [r+1,c] not in visit and tomato[r+1][c] > tomato[r][c]+1:
# Q.append([r + 1, c])
# tomato[r + 1][c] = tomato[r][c] + 1
#
# if r-1>=0 and [r-1,c] not in visit and tomato[r-1][c] == 0:
# Q.append([r-1,c])
# tomato[r-1][c] = tomato[r][c]+1
# elif r - 1 >= 0 and [r - 1, c] not in visit and tomato[r - 1][c] > tomato[r][c]+1:
# Q.append([r - 1, c])
# tomato[r - 1][c] = tomato[r][c] + 1
#
# if c+1<=N-1 and [r,c+1] not in visit and tomato[r][c+1] == 0:
# Q.append([r,c+1])
# tomato[r][c+1] = tomato[r][c]+1
# elif c+1 <= N-1 and [r, c+1] not in visit and tomato[r][c+1] > tomato[r][c]+1:
# Q.append([r, c + 1])
# tomato[r][c+1] = tomato[r][c] + 1
#
# if c-1>=0 and [r,c-1] not in visit and tomato[r][c-1] == 0:
# Q.append([r,c-1])
# tomato[r][c-1] = tomato[r][c]+1
# elif c-1>=0 and [r, c-1] not in visit and tomato[r][c-1] > tomato[r][c]+1:
# Q.append([r, c - 1])
# tomato[r][c-1] = tomato[r][c] + 1
N, M = map(int, input().split())
tomato = [list(map(int, input().split())) for _ in range(M)]
Q = deque()
# visit = []
BFS(tomato)
print(find(tomato))
| [
"rbcjswkd@gmail.com"
] | rbcjswkd@gmail.com |
be301a8455f88a99119f45995e052482a7a716b8 | 6ecebb04354cc985d9b1ff3ef632137ba104c70e | /example/curvedsky_reconstruction/lens_reconstruction_EB-iter.py | d99e7c298291d8d57001a8856cbfcfcb56be342c | [] | no_license | msyriac/cmblensplus | 2299b38462c1425ab3d7297e0d063d456e4e6070 | 819bb3d50682a54bdf49eeba0628b527457c5616 | refs/heads/master | 2023-01-04T15:03:27.472447 | 2020-11-03T14:25:45 | 2020-11-03T14:25:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | #!/usr/bin/env python
# coding: utf-8
# # A reconstruction nosie dependence on CMB white noise level
# This code compute normalization and power spectrum of quadratic estiamtors with varying CMB noise level
# In[1]:
# external
import numpy as np
from matplotlib.pyplot import *
# from cmblensplus/wrap/
import basic
import curvedsky as cs
# from cmblensplus/utils/
import plottools as plc
# First define parameters
# In[3]:
Tcmb = 2.726e6 # CMB temperature
Lmin, Lmax = 2, 4096 # maximum multipole of output normalization
rlmin, rlmax = 100, 4096 # CMB multipole range for reconstruction
L = np.linspace(0,Lmax,Lmax+1)
Lfac = (L*(L+1.))**2/(2*np.pi)
ac2rad = np.pi/10800.
# Load arrays of CMB unlensed and lensed Cls. Unlensed Cls are not used for now. The Cls should not be multiplied by any factors and should not have units.
# In[4]:
# ucl is an array of shape [0:5,0:rlmax+1] and ucl[0,:] = TT, ucl[1,:] = EE, ucl[2,:] = TE, lcl[3,:] = phiphi, lcl[4,:] = Tphi
ucl = basic.aps.read_cambcls('../data/unlensedcls.dat',2,rlmax,5)/Tcmb**2 # TT, EE, TE, pp, Tp
# lcl is an array of shape [0:4,0:rlmax+1] and lcl[0,:] = TT, lcl[1,:] = EE, lcl[2,:] = BB, and lcl[3,:] = TE
lcl = basic.aps.read_cambcls('../data/lensedcls.dat',2,rlmax,4,bb=True)/Tcmb**2 # TT, EE, BB, TE
# Loop over SNR calculation
# In[ ]:
sigs = [3.,1.,.5,.3,.1,.05]
#sigs = [5.]
snr = np.zeros(len(sigs))
for i, sig in enumerate(sigs):
nl = np.zeros((4,rlmax+1))
nl[0,:] = (sig*ac2rad/Tcmb)**2
nl[1,:] = 2*nl[0,:]
nl[2,:] = 2*nl[0,:]
ocl = lcl + nl
Ag, __ = cs.norm_lens.qeb_iter(Lmax,rlmax,rlmin,rlmax,rlmin,rlmax,lcl[1,:],ocl[1,:],ocl[2,:],ucl[3,:])
# In[ ]:
| [
"you@example.com"
] | you@example.com |
67368adaaf3dd7470553568473a2a0979294887e | 6a14512742f448efd2ae2bf86c15e7cb357dcf60 | /_unittests/ut_documentation/test_nb_artificiel_token.py | 6221fac1a3a5799619ef122cd635da633b22aeeb | [
"MIT"
] | permissive | sdpython/papierstat | c2dd47c10282deba528f321c323052baecf16b8c | 8c0772725a7dce2e88946dac82e44318173c1969 | refs/heads/master | 2023-03-16T03:03:48.594789 | 2023-03-04T11:22:19 | 2023-03-04T11:22:19 | 119,205,940 | 10 | 3 | MIT | 2022-05-17T22:52:12 | 2018-01-27T21:56:28 | Jupyter Notebook | UTF-8 | Python | false | false | 1,245 | py | # -*- coding: utf-8 -*-
"""
@brief test log(time=20s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import add_missing_development_version, skipif_travis, skipif_appveyor
from pyquickhelper.ipythonhelper import test_notebook_execution_coverage
import papierstat
class TestNotebookArtificielToken(unittest.TestCase):
def setUp(self):
add_missing_development_version(["jyquickhelper"], __file__, hide=True)
@skipif_travis("ModuleNotFoundError: No module named 'google_compute_engine'")
@skipif_appveyor("ValueError: 93066 exceeds max_map_len(32768)")
def test_notebook_artificiel_token(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import nltk
nltk.download('punkt')
nltk.download('stopwords')
self.assertTrue(papierstat is not None)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks", "lectures")
test_notebook_execution_coverage(
__file__, "artificiel_tokenize", folder, 'papierstat', copy_files=[], fLOG=fLOG)
if __name__ == "__main__":
unittest.main()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
b82a8ed7ea79dab35116808e2145b91935451c3e | 798a81fb5ebf8afa28a6ab06f8d3bf85f753e1de | /tests/test_util.py | f34a01f6963bd4f00c70f39f20106fb980f0d042 | [
"Apache-2.0"
] | permissive | erinmacf/sentence-transformers | 5d5c592126747e95b29eb5c966db6cc0d8b7ef91 | e59a07600b73d3a856778278d212dea9e8598272 | refs/heads/master | 2023-02-22T07:30:46.692044 | 2021-01-26T12:38:59 | 2021-01-26T12:38:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,274 | py | from sentence_transformers import util, SentenceTransformer
import unittest
import numpy as np
import sklearn
import torch
class UtilTest(unittest.TestCase):
def test_pytorch_cos_sim(self):
"""Tests the correct computation of util.pytorch_cos_scores"""
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
sklearn_pairwise = sklearn.metrics.pairwise.cosine_similarity(a, b)
pytorch_cos_scores = util.pytorch_cos_sim(a, b).numpy()
for i in range(len(sklearn_pairwise)):
for j in range(len(sklearn_pairwise[i])):
assert abs(sklearn_pairwise[i][j] - pytorch_cos_scores[i][j]) < 0.001
def test_semantic_search(self):
"""Tests util.semantic_search function"""
num_queries = 20
num_k = 10
doc_emb = torch.tensor(np.random.randn(1000, 100))
q_emb = torch.tensor(np.random.randn(num_queries, 100))
hits = util.semantic_search(q_emb, doc_emb, top_k=num_k, query_chunk_size=5, corpus_chunk_size=17)
assert len(hits) == num_queries
assert len(hits[0]) == num_k
#Sanity Check of the results
cos_scores = util.pytorch_cos_sim(q_emb, doc_emb)
cos_scores_values, cos_scores_idx = cos_scores.topk(num_k)
cos_scores_values = cos_scores_values.cpu().tolist()
cos_scores_idx = cos_scores_idx.cpu().tolist()
for qid in range(num_queries):
for hit_num in range(num_k):
assert hits[qid][hit_num]['corpus_id'] == cos_scores_idx[qid][hit_num]
assert np.abs(hits[qid][hit_num]['score'] - cos_scores_values[qid][hit_num]) < 0.001
def test_paraphrase_mining(self):
model = SentenceTransformer('paraphrase-distilroberta-base-v1')
sentences = [
"This is a test", "This is a test!",
"The cat sits on mat", "The cat sits on the mat", "On the mat a cat sits",
"A man eats pasta", "A woman eats pasta", "A man eats spaghetti"
]
duplicates = util.paraphrase_mining(model, sentences)
for score, a, b in duplicates:
if score > 0.5:
assert (a,b) in [(0,1), (2,3), (2,4), (3,4), (5,6), (5,7), (6,7)]
if "__main__" == __name__:
unittest.main() | [
"rnils@web.de"
] | rnils@web.de |
cd91e4611fa1336a44108f41ee9bc4ac7ffc46f4 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_long.py | f5933642bd19934a6188398881b7bbafec50e23b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py |
#calss header
class _LONG():
def __init__(self,):
self.name = "LONG"
self.definitions = [u'used to mean "(for) a long time", especially in questions and negative sentences: ', u'a long period of time before or after something: ', u'used with the past participle or the -ing form of the verb to mean that a state or activity has continued for a long time: ', u'used to say that something must happen before something else can happen: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5c50c25438be470d806d2fcc116d5292be6448ee | 05148c0ea223cfc7ed9d16234ab3e6bb40885e9d | /Packages/matplotlib-2.2.2/lib/matplotlib/tests/test_skew.py | 628506f4db481a2f0e2ce90dd76fec452a33eb7f | [
"MIT"
] | permissive | NightKirie/NCKU_NLP_2018_industry3 | 9ee226e194287fd9088429f87c58c874e050a8b3 | 23ac13644b140587e23cfeffb114c7c6f46f17a2 | refs/heads/master | 2021-06-05T05:33:09.510647 | 2018-07-05T10:19:47 | 2018-07-05T10:19:47 | 133,680,341 | 1 | 4 | MIT | 2020-05-20T16:29:54 | 2018-05-16T14:43:38 | Python | UTF-8 | Python | false | false | 7,139 | py | """
Testing that skewed axes properly work
"""
from __future__ import absolute_import, division, print_function
import itertools
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
from matplotlib.axes import Axes
import matplotlib.transforms as transforms
import matplotlib.axis as maxis
import matplotlib.spines as mspines
import matplotlib.patches as mpatch
from matplotlib.projections import register_projection
# The sole purpose of this class is to look at the upper, lower, or total
# interval as appropriate and see what parts of the tick to draw, if any.
class SkewXTick(maxis.XTick):
def update_position(self, loc):
# This ensures that the new value of the location is set before
# any other updates take place
self._loc = loc
super(SkewXTick, self).update_position(loc)
def _has_default_loc(self):
return self.get_loc() is None
def _need_lower(self):
return (self._has_default_loc() or
transforms.interval_contains(self.axes.lower_xlim,
self.get_loc()))
def _need_upper(self):
return (self._has_default_loc() or
transforms.interval_contains(self.axes.upper_xlim,
self.get_loc()))
@property
def gridOn(self):
return (self._gridOn and (self._has_default_loc() or
transforms.interval_contains(self.get_view_interval(),
self.get_loc())))
@gridOn.setter
def gridOn(self, value):
self._gridOn = value
@property
def tick1On(self):
return self._tick1On and self._need_lower()
@tick1On.setter
def tick1On(self, value):
self._tick1On = value
@property
def label1On(self):
return self._label1On and self._need_lower()
@label1On.setter
def label1On(self, value):
self._label1On = value
@property
def tick2On(self):
return self._tick2On and self._need_upper()
@tick2On.setter
def tick2On(self, value):
self._tick2On = value
@property
def label2On(self):
return self._label2On and self._need_upper()
@label2On.setter
def label2On(self, value):
self._label2On = value
def get_view_interval(self):
return self.axes.xaxis.get_view_interval()
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
class SkewXAxis(maxis.XAxis):
def _get_tick(self, major):
return SkewXTick(self.axes, None, '', major=major)
def get_view_interval(self):
return self.axes.upper_xlim[0], self.axes.lower_xlim[1]
# This class exists to calculate the separate data range of the
# upper X-axis and draw the spine there. It also provides this range
# to the X-axis artist for ticking and gridlines
class SkewSpine(mspines.Spine):
def _adjust_location(self):
pts = self._path.vertices
if self.spine_type == 'top':
pts[:, 0] = self.axes.upper_xlim
else:
pts[:, 0] = self.axes.lower_xlim
# This class handles registration of the skew-xaxes as a projection as well
# as setting up the appropriate transformations. It also overrides standard
# spines and axes instances as appropriate.
class SkewXAxes(Axes):
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='skewx')``.
name = 'skewx'
def _init_axis(self):
# Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines['top'].register_axis(self.xaxis)
self.spines['bottom'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
def _gen_axes_spines(self):
spines = {'top': SkewSpine.linear_spine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
rot = 30
# Get the standard transform setup from the Axes base class
Axes._set_lim_and_transforms(self)
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = (self.transScale +
(self.transLimits +
transforms.Affine2D().skew_deg(rot, 0)))
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (transforms.blended_transform_factory(
self.transScale + self.transLimits,
transforms.IdentityTransform()) +
transforms.Affine2D().skew_deg(rot, 0)) + self.transAxes
@property
def lower_xlim(self):
return self.axes.viewLim.intervalx
@property
def upper_xlim(self):
pts = [[0., 1.], [1., 1.]]
return self.transDataToAxes.inverted().transform(pts)[:, 0]
# Now register the projection with matplotlib so the user can select
# it.
register_projection(SkewXAxes)
@image_comparison(baseline_images=['skew_axes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='skewx')
ax.set_xlim(-50, 50)
ax.set_ylim(50, -50)
ax.grid(True)
# An example of a slanted line at constant X
ax.axvline(0, color='b')
@image_comparison(baseline_images=['skew_rects'], remove_text=True)
def test_skew_rectangle():
fix, axes = plt.subplots(5, 5, sharex=True, sharey=True, figsize=(8, 8))
axes = axes.flat
rotations = list(itertools.product([-3, -1, 0, 1, 3], repeat=2))
axes[0].set_xlim([-3, 3])
axes[0].set_ylim([-3, 3])
axes[0].set_aspect('equal', share=True)
for ax, (xrots, yrots) in zip(axes, rotations):
xdeg, ydeg = 45 * xrots, 45 * yrots
t = transforms.Affine2D().skew_deg(xdeg, ydeg)
ax.set_title('Skew of {0} in X and {1} in Y'.format(xdeg, ydeg))
ax.add_patch(mpatch.Rectangle([-1, -1], 2, 2,
transform=t + ax.transData,
alpha=0.5, facecolor='coral'))
plt.subplots_adjust(wspace=0, left=0.01, right=0.99, bottom=0.01, top=0.99)
| [
"qwer55113322@gmail.com"
] | qwer55113322@gmail.com |
4cc64b788664f79f7bb9a60d514e3e84bda1b654 | fb72aef4db762749f3ac4bc08da36d6accee0697 | /modules/photons_tile_paint/balls.py | 5707fa63c995721340e29f0b98272b832ade14b2 | [
"MIT"
] | permissive | xbliss/photons-core | 47698cc44ea80354e0dcabe42d8d370ab0623f4b | 3aca907ff29adffcab4fc22551511c5d25b8c2b7 | refs/heads/master | 2022-11-07T12:33:09.951104 | 2020-05-07T09:10:35 | 2020-05-07T09:45:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,453 | py | from photons_tile_paint.options import (
AnimationOptions,
split_by_comma,
hue_range_spec,
HueRange,
normalise_speed_options,
)
from photons_tile_paint.animation import Animation, Finish
from photons_themes.theme import ThemeColor as Color
from photons_themes.canvas import Canvas
from delfick_project.norms import dictobj, sb
from collections import defaultdict
import random
import math
class TileBallsOptions(AnimationOptions):
num_iterations = dictobj.Field(sb.integer_spec, default=-1)
random_orientations = dictobj.Field(sb.boolean, default=False)
ball_hues = dictobj.NullableField(split_by_comma(hue_range_spec()), default=[])
num_balls = dictobj.Field(sb.integer_spec, default=5)
fade_amount = dictobj.Field(sb.float_spec, default=0.02)
min_speed = dictobj.Field(sb.float_spec, default=0.6)
max_speed = dictobj.Field(sb.float_spec, default=0.8)
def final_iteration(self, iteration):
if self.num_iterations == -1:
return False
return self.num_iterations <= iteration
class Boundary:
def __init__(self, coords):
self.points = {}
for (left, top), (width, height) in coords:
for i in range(left, left + width):
for j in range(top - height, top):
self.points[(i, j)] = True
self.position_points = list(self.points)
def random_coord(self):
return random.choice(self.position_points)
def is_going_outside(self, now, nxt, dx, dy):
combined = now + nxt
most_left = min(x for x, _ in combined)
most_right = max(x for x, _ in combined)
most_top = max(y for _, y in combined)
most_bottom = min(y for _, y in combined)
if dx < 0:
now_x = min(x for x, _ in now)
else:
now_x = max(x for x, _ in now)
if dy < 0:
now_y = min(y for _, y in now)
else:
now_y = max(y for _, y in now)
outside_x = 0
outside_y = 0
for i in range(most_left, most_right + 1):
for j in range(most_bottom, most_top + 1):
point = (i, j)
if point not in self.points and point not in now:
if dx < 0:
if point[0] < now_x:
outside_x += 1
else:
if point[0] > now_x:
outside_x += 1
if dy < 0:
if point[1] < now_y:
outside_y += 1
else:
if point[1] > now_y:
outside_y += 1
return outside_x >= 2, outside_y >= 2
class Ball:
def __init__(self, boundary, hue, rate_x, rate_y):
self.hue = hue
self.boundary = boundary
self.x, self.y = self.boundary.random_coord()
self.dx = rate_x
self.dy = rate_y
self.extrax = 0
self.extray = 0
self.maybe_alter_course()
def maybe_alter_course(self):
points_now = [(math.floor(x), math.floor(y)) for x, y in self.points]
points_next = [(math.floor(x), math.floor(y)) for x, y in self.next_points]
outside_x, outside_y = self.boundary.is_going_outside(
points_now, points_next, self.dx, self.dy
)
if not outside_x and not outside_y:
return
if outside_x:
self.dx *= -1
if outside_y:
self.dy *= -1
self.extra_x = random.randrange(0, 5) / 10
self.extra_y = random.randrange(0, 5) / 10
if (self.dy < 0) ^ (self.extra_y < 0):
self.extra_y *= -1
if (self.dx < 0) ^ (self.extra_x < 0):
self.extra_x *= -1
@property
def top(self):
return self.y
@property
def bottom(self):
return self.y - 1
@property
def right(self):
return self.x + 1
@property
def left(self):
return self.x
@property
def points(self):
return [
(self.x, self.y),
(self.x, self.y - 1),
(self.x + 1, self.y),
(self.x + 1, self.y - 1),
]
@property
def next_points(self):
x, y = self.next_point()
return [(x, y), (x, y - 1), (x + 1, y), (x + 1, y - 1)]
def next_point(self):
x = self.x + self.dx + self.extrax
y = self.y + self.dy + self.extray
return x, y
def progress(self):
self.x, self.y = self.next_point()
self.maybe_alter_course()
def pixels(self):
for x, y in self.points:
yield (math.floor(x), math.floor(y)), Color(self.hue, 1, 1, 3500)
class TileBallsState:
def __init__(self, coords, options):
self.options = options
self.boundary = Boundary(coords)
self.balls = []
self.ensure_enough_balls()
self.canvas = Canvas()
def ensure_enough_balls(self):
need = self.options.num_balls - len(self.balls)
if need > 0:
self.balls.extend([self.make_ball() for _ in range(need)])
def make_ball(self):
if self.options.min_speed == self.options.max_speed:
rate_x = self.options.min_speed
rate_y = self.options.max_speed
else:
mn = int(self.options.min_speed * 100)
mx = int(self.options.max_speed * 100)
rate_x = random.randint(mn, mx) / 100
rate_y = random.randint(mn, mx) / 100
if random.randrange(0, 100) < 50:
rate_x *= -1
if random.randrange(0, 100) < 50:
rate_y *= -1
ball_hue = random.choice(self.options.ball_hues)
return Ball(self.boundary, ball_hue.make_hue(), rate_x, rate_y)
def tick(self):
for ball in self.balls:
ball.progress()
return self
def make_canvas(self):
for point, pixel in list(self.canvas):
pixel.brightness -= self.options.fade_amount
if pixel.brightness < 0:
del self.canvas[point]
pixels = defaultdict(list)
for ball in self.balls:
for point, pixel in ball.pixels():
pixels[point].append(ball)
self.canvas[point] = pixel
collided_balls = []
for balls in pixels.values():
if len(balls) > 1:
collided_balls.extend(balls)
for ball in balls:
for point, _ in ball.pixels():
self.canvas[point] = Color(0, 0, 1, 3500)
self.balls = [b for b in self.balls if b not in collided_balls]
self.ensure_enough_balls()
return self.canvas
class TileBallsAnimation(Animation):
def setup(self):
self.iteration = 0
if self.options.random_orientations:
self.random_orientations = True
normalise_speed_options(self.options)
if not self.options.ball_hues:
self.options.ball_hues = [HueRange(0, 360)]
def next_state(self, prev_state, coords):
if prev_state is None:
return TileBallsState(coords, self.options)
self.iteration += 1
if self.options.final_iteration(self.iteration):
raise Finish("Reached max iterations")
return prev_state.tick()
def make_canvas(self, state, coords):
return state.make_canvas()
| [
"stephen@delfick.com"
] | stephen@delfick.com |
1c4d18d97746bdc86efb298a2ae3adb8481ac0ef | 9b19c22ce89e2895fc16420fae7114879a3ed1dc | /models/network.py | 3b70a6aac438c1889f17906e40c69b17e99f74aa | [
"MIT"
] | permissive | TangLisan/pytorch-office_finetune | 3b5ad6a1ac25633e8b64ab0a8e316ddcb67add78 | bd953404660b5098103f583852395a1c98cc4ea5 | refs/heads/master | 2020-03-22T21:24:17.253843 | 2018-06-29T17:41:18 | 2018-06-29T17:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py | import torch.nn as nn
from torchvision import models
from .alexnet import alexnet
class AlexModel(nn.Module):
""" AlexNet pretrained on imagenet for Office dataset"""
def __init__(self):
super(AlexModel, self).__init__()
self.restored = False
model_alexnet = models.alexnet(pretrained=True)
self.features = model_alexnet.features
self.fc = nn.Sequential()
for i in range(6):
self.fc.add_module("classifier" + str(i),
model_alexnet.classifier[i])
self.__in_features = model_alexnet.classifier[6].in_features # 4096
self.fc.add_module("final", nn.Linear(4096, 31))
def forward(self, input_data):
input_data = input_data.expand(input_data.data.shape[0], 3, 224, 224)
feature = self.features(input_data)
feature = feature.view(-1, 256*6*6)
class_output = self.fc(feature)
return class_output
class AlexModel_LRN(nn.Module):
""" AlexNet pretrained on imagenet for Office dataset"""
def __init__(self):
super(AlexModel_LRN, self).__init__()
self.restored = False
model_alexnet = alexnet(pretrained=True)
self.features = model_alexnet.features
self.fc = nn.Sequential()
for i in range(6):
self.fc.add_module("classifier" + str(i),
model_alexnet.classifier[i])
self.__in_features = model_alexnet.classifier[6].in_features # 4096
self.classifier = nn.Sequential(
nn.Linear(4096, 31),
)
def forward(self, input_data):
input_data = input_data.expand(input_data.data.shape[0], 3, 227, 227)
feature = self.features(input_data)
feature = feature.view(-1, 256*6*6)
fc = self.fc(feature)
class_output = self.classifier(fc)
return class_output
class ResModel(nn.Module):
def __init__(self):
super(ResModel, self).__init__()
self.restored = False
model_resnet50 = models.resnet50(pretrained=True)
self.features = nn.Sequential(
model_resnet50.conv1,
model_resnet50.bn1,
model_resnet50.relu,
model_resnet50.maxpool,
model_resnet50.layer1,
model_resnet50.layer2,
model_resnet50.layer3,
model_resnet50.layer4,
model_resnet50.avgpool,
)
self.__in_features = model_resnet50.fc.in_features
self.fc = nn.Linear(self.__in_features, 31)
def forward(self, input):
x = self.features(input)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x | [
"wogong38@gmail.com"
] | wogong38@gmail.com |
1827ab09efa83cecd6af6f1223a86a09e3b46a54 | 594fd699d9f8070c867b83b11881ca1f624b417b | /EstruturaDeDecisao/decrescente.py | b7a6621da3be011d88c1fb924021f09189db1f3b | [] | no_license | felipmarqs/exerciciospythonbrasil | f140df2c59b933cc0460d5986afc8c6ddd493556 | 6d02e85ae5986d3b20cfd8781174998d871eeb90 | refs/heads/master | 2020-04-04T05:25:23.751175 | 2018-12-12T18:44:38 | 2018-12-12T18:44:38 | 155,745,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | #Faça um Programa que leia três números e mostre-os em ordem decrescente
n1 = int(input("Digite um número: "))
n2 = int(input("Digite um número: "))
n3 = int(input("Digite um número: "))
if n1 > n2 > n3:
print(n1,n2,n3)
elif n1 > n3 > n2:
print(n1,n3,n2)
elif n2 > n1 > n3:
print(n2,n1,n3)
elif n2 > n3 > n1:
print(n2,n3,n1)
elif n3 > n1 > n2:
print(n3,n1,n2)
elif n3 > n2 > n1:
print(n3,n2,n1)
else:
print('erro') | [
"noreply@github.com"
] | felipmarqs.noreply@github.com |
7bf13d83b6a5e244be420a98f6b63913b90ac084 | ef90992dc00640f42ec615075a9b030b771f81e4 | /Algorithm/linked_list.py | e544fd8b14fb1fc7c5a90bb51f3a36afef290f27 | [] | no_license | korea7030/pythonwork | 88f5e67b33e9143eb40f6c10311a29e08317b77e | 70741acb0477c9348ad3f1ea07a183dda82a5402 | refs/heads/master | 2023-01-08T01:47:15.141471 | 2020-09-09T13:28:20 | 2020-09-09T13:28:20 | 54,378,053 | 0 | 0 | null | 2022-12-26T20:25:43 | 2016-03-21T10:00:07 | Jupyter Notebook | UTF-8 | Python | false | false | 2,540 | py | class Node:
def __init__(self, item):
self.item = item # item
self.next = None # next
class LinkedList:
def __init__(self):
self.head = None # 첫번째 노드를 가리킴
def push(self, item):
new_node = Node(item)
new_node.next = self.head
self.head = new_node
def next(self):
if self.head == None:
print("no next item")
else:
return self.head.item
def printList(self):
current = self.head
if (current is None):
print("Not information")
return
while(current is not None):
print(current.item, end=" ")
current = current.next
def reverse(self):
prev = None
current = self.head
while(current is not None):
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
def search(self, item):
temp = self.head
flag = False
while temp is not None and flag is False:
if(temp.item == item):
flag = True
else:
temp = temp.next
if flag:
print("find", temp.item)
else:
print("Not find")
def remove(self, item):
prev = None
current = self.head
flag = False
while current is not None and flag is False:
if (current.item == item):
flag = True
else:
prev = current
current = current.next
if current is None:
print("not find")
elif prev == None: # 노드가 한개 일때 삭제한 경우
self.head = current.next
else: # None 값 대입
prev.next = current.next
def get_last_n_node(self, n):
temp1 = self.head
temp2 = self.head
if n != 0:
for i in range(n):
temp2 = temp2.next
if temp2 is None:
return None
while temp2.next is not None:
temp2 = temp2.next
temp1 = temp1.next
return temp1.item
if __name__ == "__main__":
linked_list = LinkedList()
linked_list.push(1)
linked_list.push(2)
linked_list.push(3)
linked_list.push(4)
linked_list.printList()
linked_list.reverse()
linked_list.printList()
a = linked_list.get_last_n_node(3)
# print(a)
# print(linked_list.next())
| [
"korea7030@naver.com"
] | korea7030@naver.com |
07314e8b937bc00e5930881452ba7376f3dd6ff5 | 4dd6a8d8024a72a3e2d8e71e86fd34888a149902 | /dacon/dacon01/try_03-2.py | 6883e167ab7fdf28a59e3e0c854b426224425b60 | [] | no_license | KOOKDONGHUN/study | d483b125d349956b325bc5f4d99a4a95dd80ccbc | 517effbb19ddc820d53f0a6194463e7687467af6 | refs/heads/master | 2023-01-14T09:13:48.346502 | 2020-11-20T09:03:25 | 2020-11-20T09:03:25 | 259,818,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,280 | py | from xgboost import XGBClassifier, plot_importance, XGBRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import train_test_split, RandomizedSearchCV, KFold, cross_val_score,GridSearchCV
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score, mean_absolute_error
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectFromModel
train = np.load('./data/dacon/comp1/train.npy')
test = np.load('./data/dacon/comp1/pred.npy')
x = train[:, :-4]
y = train[:, -4:]
print(x)
print(y)
# 회기 모델
x_train, x_test, y_train, y_test = train_test_split(x,y,train_size=0.8,
random_state=0)
# n_estimators = 450
# learning_rate = 0.1
# colsample_bytree = 0.85
# colsample_bylevel = 0.9
# max_depth = 6
# n_jobs = 6
parameters = [{"n_estimators": [2000],
"learning_rate": [0.01],
"max_depth": [5],
"colsample_bytree": [0.79],
"colsample_bylevel": [0.79]}]
parameters2 = [{"n_estimators": [2000],
"learning_rate": [0.01],
"max_depth": [6],
"colsample_bytree": [0.79],
"colsample_bylevel": [0.79]}]
kfold = KFold(n_splits=4, shuffle=True, random_state=66)
model = XGBRegressor(n_jobs=6)
model2 = XGBRegressor(n_jobs=6)
name_ls = ['hhb','hbo2','ca','na']
tmp_dic = dict()
###
model.fit(x_train,y_train[:, 2])
thresholds_2 = np.sort(model.feature_importances_)
model.fit(x_train,y_train[:, 3])
thresholds_3 = np.sort(model.feature_importances_)
###
selection_2=SelectFromModel(model,threshold=thresholds_2[100],prefit=True)
selection_3=SelectFromModel(model,threshold=thresholds_3[100],prefit=True)
selection_x_train_2 = selection_2.transform(x_train)
selection_x_train_3 = selection_3.transform(x_train)
###
selection_x_test_2 = selection_2.transform(x_test)
selection_x_test_3 = selection_3.transform(x_test)
###
test_2 = selection_2.transform(test)
test_3 = selection_3.transform(test)
###
model = GridSearchCV(model, parameters, cv = kfold)
model2 = GridSearchCV(model2, parameters2, cv = kfold)
## hbb, hbo2
for i in range(2):
model.fit(x_train,y_train[:, i])
y_test_pred = model.predict(x_test)
r2 = r2_score(y_test[:, i],y_test_pred)
print(f"r2 : {r2}")
mae = mean_absolute_error(y_test[:, i],y_test_pred)
print(f"mae : {mae}")
y_pred = model.predict(test)
tmp_dic[name_ls[i]] = y_pred
## ca
model2.fit(selection_x_train_2,y_train[:, 2])
y_test_pred = model2.predict(selection_x_test_2)
r2 = r2_score(y_test[:, 2],y_test_pred)
print(f"r2 : {r2}")
mae = mean_absolute_error(y_test[:, 2],y_test_pred)
print(f"mae : {mae}")
y_pred = model2.predict(test_2)
tmp_dic[name_ls[i]] = y_pred
## na
model2.fit(selection_x_train_3,y_train[:, 3])
y_test_pred = model2.predict(selection_x_test_3)
r2 = r2_score(y_test[:, 3],y_test_pred)
print(f"r2 : {r2}")
mae = mean_absolute_error(y_test[:, 3],y_test_pred)
print(f"mae : {mae}")
y_pred = model2.predict(test_3)
tmp_dic[name_ls[i]] = y_pred
df = pd.DataFrame(tmp_dic,range(10000,20000),columns=['hhb','hbo2','ca','na'])
# print(df)
df.to_csv('./submission.csv',index_label='id') | [
"dh3978@naver.com"
] | dh3978@naver.com |
bab04255c775ca3e378bc7a8498fa5ff3a634212 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_jutting.py | 0acbaa3641341ca83124c6516a98b3157035b9b7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _JUTTING():
def __init__(self,):
self.name = "JUTTING"
self.definitions = jut
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['jut']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
16f97ac15b2d4fecf77b09d2857c71d479c0ad5f | 41cf1a4da983ba4673af58353813f77f229ce9c1 | /nbx/nbmanager/bundle/bundle.py | a28e5d2f0b4562f169f350332da24502007e8b39 | [
"MIT"
] | permissive | dalejung/nbx | d940d38084bc99e41e963d22ea43c1b43d7dffae | e5a9571bc5c91aeb279ff337383405a846bb6128 | refs/heads/master | 2023-07-08T03:22:39.939433 | 2023-06-27T00:32:35 | 2023-06-27T00:32:35 | 17,010,961 | 2 | 1 | null | 2017-04-03T03:39:36 | 2014-02-20T06:03:06 | Python | UTF-8 | Python | false | false | 2,457 | py | import os
import io
import nbformat
from IPython.utils import tz
class Bundle(object):
def __init__(self, path):
name = path.rsplit('/', 1)[-1]
self.name = name
self.path = path
def __repr__(self):
cname = self.__class__.__name__
return "{cname}(name={name}, path={path})".format(cname=cname,
**self.__dict__)
@property
def files(self):
try:
root, dirs, files = next(os.walk(self.path))
# filter out compiled files
files = filter(lambda x: not x.endswith('.pyc'), files)
files = list(files)
except StopIteration:
files = []
return files
class NotebookBundle(Bundle):
@property
def notebook_content(self):
filepath = os.path.join(self.path, self.name)
with io.open(filepath, 'r', encoding='utf-8') as f:
try:
nb = nbformat.read(f, as_version=4)
except Exception as e:
nb = None
return nb
@property
def files(self):
files = super(NotebookBundle, self).files
assert self.name in files
files.remove(self.name)
assert self.name not in files
return files
def get_model(self, content=True, file_content=True):
os_path = os.path.join(self.path, self.name)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model = {}
model['name'] = self.name
model['path'] = self.path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'notebook'
model['is_bundle'] = True
model['content'] = None
if content:
model['content'] = self.notebook_content
files = {}
for fn in self.files:
with open(os.path.join(self.path, fn), 'rb') as f:
data = None
if file_content:
try:
data = f.read().decode('utf-8')
except UnicodeDecodeError:
# TODO how to deal with binary data?
# right now we skip
continue
files[fn] = data
model['__files'] = files
return model
| [
"dale@dalejung.com"
] | dale@dalejung.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.