blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe5a15533c33ffc5a7c712c95bd101c436d9abeb | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_211/ch11_2020_03_05_22_17_54_209757.py | 0a1e69d816050755f964c115c59f668d1f8cb2ac | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | import math
def distancia_euclidiana(x1,y1,x2,y2):
dx=x2-x1
dy=y2-y1
dist= mat.sqrt(dx**2+dy**2)
return dist_
| [
"you@example.com"
] | you@example.com |
cfbbb16014049c04dd2b6265a388c8013940b803 | 6d8ebfaf95299fa7fa892db4565f3597a72f5219 | /rest_mongo/fileutils.py | 58273a1ebc278d89e96d80a745f932d772670277 | [] | no_license | videntity/georegistry | 30aecec862f10d364cb72ce391656dc8d2a0d794 | 44fcda20d669650d1efbfee4907986654fd6d931 | refs/heads/master | 2021-01-23T20:13:20.899937 | 2011-06-11T13:15:36 | 2011-06-11T13:15:36 | 1,880,133 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,346 | py | #!/usr/bin/env python
from django.conf import settings
import sys, os
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import mimetypes
from datetime import datetime, timedelta
"""
simpleS3.py
"""
#
# simpleS3.py
#
# By: Alan Viars
# Copyright Videntity Systems, Inc. 2009
# All rights Reseved.
# License: New BSD
# Last Updated: May 9, 2009
#
# This was tested using Python 2.5 and Ubuntu Linux, but
# it should run fine w/ other configurations.
# You will need to install boto to get this library running
# and of course you need an S3 account from Amazon.
# See http://aws.amazon.com
#
# NOTES ON INSTALLING BOTO:
# 1.7.a is latestversion of boto at the time of writing.
# Execute the following from a command line prompt
# > wget http://boto.googlecode.com/files/boto-1.7a.tar.gz
# > tar zxvf boto-1.7a.tar.gz
# > cd boto-1.7a
# Run this as root or w/ admin privileges
# > python setup.py install
# > if on Ubuntu or Debian deravitive, use sudo like so:
# > sudo python setup.py install
#Set these to match your Amazon S3 Account
AWS_ACCESS_KEY= '*****PUT_YOUR_KEY_HERE****'
AWS_SECRET_ACCESS_KEY='*****PUT_YOUR_SECRET_KEY_HERE****'
class SimpleS3:
"""
A very simple class library to simple store
and retieve files in Amazon S3
Works with HTTPS/port 443 only (no HTTP/port 80)
"""
#Store a file in s3
def store_in_s3 (self, bucket,
filename,
local_filepath,
public=False):
"""Store a file in s3"""
url=""
try:
conn= S3Connection(settings.AWS_KEY,
settings.AWS_SECRET)
b = conn.create_bucket(bucket)
k=Key(b)
k.key=filename
mime = mimetypes.guess_type(filename)[0]
if mime==None:
#print "I couldn't guess MIME because"
#print "I couldn't detect a file ext."
#print "Using 'application/octet-stream'"
#print "as the default MIME instead."
mime = "application/octet-stream"
#print "MIME Type = %s" % (mime)
k.set_metadata("Content-Type", mime)
k.set_contents_from_filename(local_filepath)
if public==True:
k.set_acl("public-read")
url = "https://%s.s3.amazonaws.com/%s" % (bucket,
k.key)
except:
return url
finally:
return url
#Get a file from s3
def get_from_s3 (bucket, filename, local_filepath ):
"""Get a file from s3"""
retval = False
try:
conn= S3Connection(AWS_ACCESS_KEY,
AWS_SECRET_ACCESS_KEY)
b = conn.create_bucket(bucket)
k = Key(b)
k.key = filename
k.get_contents_to_filename(local_filepath)
retval = True
except:
#print "Error in get_from_s3"
#print sys.exc_info()
return retval
finally:
return retval
# Our MAIN application which takes 3 command line arguments
# Take in a mode, bucketname, filename, and public T/F.
# if mode=PUT, then store the file in S3
# If mode=GET, then read the file from S3,
# and write it to local disk
def handle_uploaded_file(file, user, uuid):
responsedict={'localfilename':None,
'urli': None
}
#create folder name for the api username
dirname = '%s/%s/' %(settings.MEDIA_ROOT, user.username)
try:
#create directory if it doesn't exist
if not os.path.isdir(dirname):
os.mkdir(dirname)
#get a timestamp
current_time = str(datetime.utcnow())
time_str=current_time.replace(" ", '_')
#create file name by using current datetime
new_file_name='%s_%s' %(file.name, uuid)
#create the entire directory string
file_name='%s%s' %(dirname, new_file_name)
#open to write
destination = open(file_name, 'wb')
#write out in chunks
for chunk in file.chunks():
destination.write(chunk)
destination.close()
full_path=file_name
file_name="%s/%s" %(user.username, new_file_name)
except:
responsedict['errors']="There was an error uploading your file."
print sys.exc_info()
return responsedict
if settings.BINARY_STORAGE=='LOCAL':
responsedict['localfilename']="file://%s" % (full_path)
responsedict['urli']=file_name
elif settings.BINARY_STORAGE=='AWSS3':
s=SimpleS3()
responsedict['urli']=s.store_in_s3 (settings.AWS_BUCKET,
new_file_name,
file_name,
settings.AWS_PUBLIC)
if responsedict['urli']=="":
responsedict['errors']="AWS S3 file %s upload failed" % (new_file_name)
return responsedict | [
"aviars@videntity.com"
] | aviars@videntity.com |
16a07c4a5130861ee929c4c6aa071b6b522282fa | ce29884aa23fbb74a779145046d3441c619b6a3c | /hot/101.py | 59780c0d19d68e9723dd52964b189a62795bf5f8 | [] | no_license | gebijiaxiaowang/leetcode | 6a4f1e3f5f25cc78a5880af52d62373f39a546e7 | 38eec6f07fdc16658372490cd8c68dcb3d88a77f | refs/heads/master | 2023-04-21T06:16:37.353787 | 2021-05-11T12:41:21 | 2021-05-11T12:41:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020/11/18 21:40
# @Author : dly
# @File : 101.py
# @Desc :
# 对称二叉树
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if not root:
return True
def dfs(ra, rb):
if not ra and not rb:
return True
if not ra or not rb:
return False
if ra.val != rb.val:
return False
return dfs(ra.left, rb.right) and dfs(ra.right, rb.left)
return dfs(root.left, root.right)
| [
"1083404373@qq.com"
] | 1083404373@qq.com |
1f778357bc0ffe65c05214c2bc7f0432cea408f5 | aaa204ad7f134b526593c785eaa739bff9fc4d2a | /tests/providers/amazon/aws/hooks/test_step_function.py | 54f9556b7016ac20701ad68f26a857dad7136944 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | cfei18/incubator-airflow | 913b40efa3d9f1fdfc5e299ce2693492c9a92dd4 | ffb2078eb5546420864229cdc6ee361f89cab7bd | refs/heads/master | 2022-09-28T14:44:04.250367 | 2022-09-19T16:50:23 | 2022-09-19T16:50:23 | 88,665,367 | 0 | 1 | Apache-2.0 | 2021-02-05T16:29:42 | 2017-04-18T20:00:03 | Python | UTF-8 | Python | false | false | 2,550 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import unittest
from airflow.providers.amazon.aws.hooks.step_function import StepFunctionHook
try:
from moto import mock_stepfunctions
except ImportError:
mock_stepfunctions = None
@unittest.skipIf(mock_stepfunctions is None, 'moto package not present')
class TestStepFunctionHook(unittest.TestCase):
@mock_stepfunctions
def test_get_conn_returns_a_boto3_connection(self):
hook = StepFunctionHook(aws_conn_id='aws_default')
assert 'stepfunctions' == hook.get_conn().meta.service_model.service_name
@mock_stepfunctions
def test_start_execution(self):
hook = StepFunctionHook(aws_conn_id='aws_default', region_name='us-east-1')
state_machine = hook.get_conn().create_state_machine(
name='pseudo-state-machine', definition='{}', roleArn='arn:aws:iam::000000000000:role/Role'
)
state_machine_arn = state_machine.get('stateMachineArn')
execution_arn = hook.start_execution(
state_machine_arn=state_machine_arn, name=None, state_machine_input={}
)
assert execution_arn is not None
@mock_stepfunctions
def test_describe_execution(self):
hook = StepFunctionHook(aws_conn_id='aws_default', region_name='us-east-1')
state_machine = hook.get_conn().create_state_machine(
name='pseudo-state-machine', definition='{}', roleArn='arn:aws:iam::000000000000:role/Role'
)
state_machine_arn = state_machine.get('stateMachineArn')
execution_arn = hook.start_execution(
state_machine_arn=state_machine_arn, name=None, state_machine_input={}
)
response = hook.describe_execution(execution_arn)
assert 'input' in response
| [
"noreply@github.com"
] | cfei18.noreply@github.com |
67258a6994da1d273531135b1b83de48c41edf94 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/mingw64/mingw64_libcsv.py | fdb7c482927ad1b4dc16ce316999985addbca2f6 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from .base import Mingw64BaseRecipe
class Mingw64LibCsvRecipe(Mingw64BaseRecipe):
def __init__(self, *args, **kwargs):
super(Mingw64LibCsvRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'd9c0431cb803ceb9896ce74f683e6e5a' \
'0954e96ae1d9e4028d6e0f967bebd7e4'
self.name = 'mingw64-libcsv'
self.version = '3.0.3'
self.url = 'http://downloads.sourceforge.net/project/libcsv/libcsv/' \
'libcsv-$version/libcsv-$version.tar.gz'
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
09a753257b8c194d653adc57fa8fc15b1ef05a9a | a5fdc429f54a0deccfe8efd4b9f17dd44e4427b5 | /0x08-python-more_classes/4-rectangle.py | f4dd8584a36172af297d6445e87fa837e70d95f0 | [] | no_license | Jilroge7/holbertonschool-higher_level_programming | 19b7fcb4c69793a2714ad241e0cc4fc975d94694 | 743a352e42d447cd8e1b62d2533408c25003b078 | refs/heads/master | 2022-12-20T20:41:33.375351 | 2020-09-25T02:02:28 | 2020-09-25T02:02:28 | 259,471,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,192 | py | #!/usr/bin/python3
"""Module for evolved rectangle"""
class Rectangle:
"""class Rectangle with priv inst attrib width and height"""
def __init__(self, width=0, height=0):
self.__width = width
self.__height = height
@property
def width(self):
"""Method to get value of width"""
return self.__width
@width.setter
def width(self, value):
"""Method to set the value of width"""
if not isinstance(value, int):
raise TypeError("width must be an integer")
elif isinstance(value, int):
if value < 0:
raise ValueError("width must be >= 0")
else:
self.__width = value
@property
def height(self):
"""Method to get the value of height"""
return self.__height
@height.setter
def height(self, value):
"""Method to set the value of height"""
if not isinstance(value, int):
raise TypeError("height must be an integer")
elif isinstance(value, int):
if value < 0:
raise ValueError("height must be >= 0")
else:
self.__height = value
def area(self):
"""Pub Ins Method to determine area of inst rect"""
return int(self.__width) * int(self.__height)
def perimeter(self):
"""Pub Ins Method to determine perimeter of inst rect"""
if int(self.__width) == 0 or int(self.__height) == 0:
return 0
else:
return (int(self.__width) * 2) + (int(self.__height) * 2)
def __str__(self):
"""Built in method to return printed representation of rect"""
picture = ""
for i in range(int(self.__height)):
for i in range(int(self.__width)):
if int(self.__height) == 0 or int(self.__width) == 0:
return picture
picture += "#"
picture += '\n'
return picture[:-1]
def __repr__(self):
"""Built in method to return string representation of rect"""
return "Rectangle({}, {})".format(eval(repr(self.__width)), (
eval(repr(self.__height))))
| [
"1672@holbertonschool.com"
] | 1672@holbertonschool.com |
01c294996900995b8b6e38364a71528e9204bfe2 | 2b3e9b32a38f4992c529de56b4baa51e1a674c4e | /ccui/testexecution/templatetags/execution.py | 4e6b442e37794b16a9f4d7be4b72a9985bd786e8 | [] | no_license | camd/caseconductor-ui | 2c4f63fd6c20ee421012d8770b3b873c1b4f4232 | deb6b22ed417740bf947e86938710bd5fa2ee2e7 | refs/heads/master | 2021-01-18T05:36:22.647236 | 2011-10-10T14:48:29 | 2011-10-10T14:48:29 | 2,447,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | from django import template
from django.template.loader import render_to_string
from classytags.core import Tag, Options
from classytags.arguments import Argument
from ..models import TestCaseAssignmentList
register = template.Library()
class RunCase(Tag):
name = "run_case"
options = Options(
Argument("includedtestcase"),
Argument("user"),
Argument("environments")
)
def render_tag(self, context, includedtestcase, user, environments):
assignments = TestCaseAssignmentList.get(auth=user.auth).filter(
testCaseVersion=includedtestcase.testCaseVersion.id,
testRun=includedtestcase.testRun.id,
tester=user.id)
if len(assignments):
assignment = assignments[0]
else:
assignment = includedtestcase.assign(user, auth=user.auth)
# @@@ need a better way to filter results by environment group
result = None
for res in assignment.results:
if res.environments.match(environments):
result = res
break
if result is None:
# @@@ no environment match - should never happen.
return u""
return render_to_string(
"runtests/_run_case.html",
{"case": assignment.testCase,
"caseversion": assignment.testCaseVersion,
"result": result,
"open": False,
})
register.tag(RunCase)
| [
"carl@oddbird.net"
] | carl@oddbird.net |
0fe941019348268ae95cac69f0823b8ce404d416 | 987ead1eb0877b9bdea16f3ee50bf19d5fe204bd | /DL/face_feature/faceFeature.py | a22d34eba15d893f8ddbe019281b82568ce7d04a | [] | no_license | ZHX1996/project | da62151e32254848a02292a2f9bdb1db17850d67 | 5a57be55cf173dde7e5a135a9cf1cfbc9a63a158 | refs/heads/master | 2021-07-15T11:36:02.412231 | 2020-05-15T08:51:34 | 2020-05-15T08:51:34 | 94,512,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py | from itertools import islice
import numpy as np
import io
def train_data():
dtrain, label, buff = [], [], []
i = 0
train_file = io.open('training.csv', 'r', encoding='utf-8')
for s in islice(train_file, 1, 3):
p = s.split(',')
for i in range(len(p)-1):
buff.append(float(p[i]))
label.append(buff)
buff = []
q = p[-1]
e = q.split(' ')
for i in range(len(e)):
buff.append(int(e[i]))
dtrain.append(buff)
buff = []
return np.array(label), np.array(dtrain)
# print(dtrain)
# print(label)
def test_data():
imageid, image, buff = [], [], []
test_file = io.open('test.csv', 'r', encoding='utf-8')
for s in islice(test_file, 1, 3):
p = s.split('\n')
temp = p[0].split(',')
imageid.append(int(temp[0]))
e = temp[1].split(' ')
for i in range(len(e)):
buff.append(int(e[i]))
image.append(buff)
buff = []
print(imageid)
print(image)
if __name__ == '__main__':
# train_data()
test_data() | [
"1365370292@qq.com"
] | 1365370292@qq.com |
f66d19147add74c58d7348ca20d812a10a1f7bf4 | d19bfba1c92a59a9d5d888e87db32a2cd1e7bd00 | /example.py | e405d017f7947ad196b0a5d5b3ecc3639cb05de9 | [] | no_license | codesharedot/Plot-no-111 | 25a3c6a2b7895efc9f327a1b0695749361e2d858 | e046244c102253f5e538edc892f3ac106b8eec61 | refs/heads/master | 2021-03-15T03:39:44.802296 | 2020-03-12T11:51:42 | 2020-03-12T11:51:42 | 246,821,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | import numpy as np
import matplotlib.pyplot as plt
labels = ['G1', 'G2', 'G3', 'G4', 'G5']
m_means = [8,1,6,9,10]
m_std = [2, 3, 4, 1, 2]
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
ax.bar(labels, m_means, width, yerr=m_std, label='Data')
ax.set_ylabel('Scores')
ax.set_title('Visual')
ax.legend()
#plt.show()
plt.savefig('chart.png') | [
"codeto@sent.com"
] | codeto@sent.com |
618f3a962bee7c18f831cf4dd618b01831a5b133 | 112bcac00e68ffeceeffec335a87411f141ad17f | /codes/ch4/gethostbyaddr-paranoid.py | 55dde1e691537b3e573ceba19a6590a3b79bc697 | [] | no_license | wwq0327/PyNP | a5a214fde76ef0701a8205a3509e762f47f8fc2c | 684d13533d7296116aa7a099347365ca69a72004 | refs/heads/master | 2021-01-15T17:45:23.434786 | 2011-09-23T16:41:18 | 2011-09-23T16:41:18 | 2,421,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gethostbyaddr-paranoid.py
~~~~~~~~~~~~~~~~~~~~
:date: 2011-09-22
:from: Python Network Programming
"""
import sys, socket
def getipaddrs(hostname):
'''根据域名获得IP地址列表'''
result = socket.getaddrinfo(hostname, None, 0, socket.SOCK_STREAM)
return [x[4][0] for x in result]
def gethostname(ipaddr):
'''根据IP返回hostname'''
return socket.gethostbyaddr(ipaddr)[0]
try:
hostname = gethostname(sys.argv[1])
ipaddrs = getipaddrs(hostname)
except socket.herror, e:
print "No host names available for %s; this may be normal." % sys.argv[1]
sys.exit(0)
except socket.gaierror, e:
print "Got hostname %s, but it could not be forward-resolved: %s" % \
(hostname, str(e))
sys.exit(1)
if not sys.argv[1] in ipaddrs:
print "Got hostname %s, but on forward lookup," % hostname
print "original IP %s did not appear in IP address list." % sys.argv[1]
sys.exit(1)
print "validated hostname:", hostname
| [
"wwq0327@gmail.com"
] | wwq0327@gmail.com |
c0fb6fdd9e20b83a6bea88c1db214ee925b3d934 | 27a2864de9876b53db02bcfd1df2342af193933a | /users/views.py | d49c14148e211a0e9975e507da46a9d553808334 | [] | no_license | VinneyJ/learning_tracker | 40d04c19800cfa62ed4789753441208bf5d92cf6 | 63b0b00e9d153d193c7f12f6af57f1b756d95c5d | refs/heads/master | 2022-08-28T07:50:47.700443 | 2020-05-29T07:22:08 | 2020-05-29T07:22:08 | 266,212,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | from django.shortcuts import render, redirect
from django.contrib.auth import login
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def register(request):
if request.method != 'POST':
form = UserCreationForm()
else:
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
login(request, new_user)
return redirect('learning_logs:index')
context = {'form':form}
return render(request, 'registration/register.html', context) | [
"vincentjayden49@gmail.com"
] | vincentjayden49@gmail.com |
87dc8f8a1fa3afbc82df866419a33d3ed8c8f399 | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_biopsy.py | dd6a45a631739b758f371642bb938df3da77b5b4 | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.biopsy import biopsy
def test_biopsy():
"""Test module biopsy.py by downloading
biopsy.csv and testing shape of
extracted data has 699 rows and 11 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = biopsy(test_path)
try:
assert x_train.shape == (699, 11)
except:
shutil.rmtree(test_path)
raise()
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
33daf595f2ce39ec83394a7b12ab536c060a963d | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/_exercises/_templates/PyQt5 Quick Start PyQt5 Database Operation/7 5. Paging Query 3. Paging Query Implementation.py | 1666eaaa01177d4de2dc0c3b30a26ae8e9ec6872 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 4,714 | py | # ______ ___
# ____ ?.?S.. ______ ?SD.., ?SQ.., ?STM.., QSqlQueryModel
# ____ ?.?C.. ______ *
# ____ ?.?W.. ______ *
# ______ re
#
# c_ DataGrid(?W..):
# ___ - parent_None):
# s__(DataGrid, self). - (parent)
# # Declare Database Connections
# db _ N..
# # Layout Manager
# layout _ ?VBL..
# # Query Model
# queryModel _ QSqlQueryModel()
# # Table View
# tableView _ ?TV..
# tableView.sM..(queryModel)
# #
# totalPageLabel _ ?L..
# currentPageLabel _ ?L..
# switchPageLineEdit _ ?LE..
# prevButton _ ?PB..("Prev")
# nextButton _ ?PB..("Next")
# switchPageButton _ ?PB..("Switch")
# # Current Page
# currentPage _ 1
# # PageCount
# totalPage _ N..
# # Total Records
# totalRecordCount _ N..
# # Number of records per page
# pageRecordCount _ 4
#
# initUI()
# initializedModel()
# setUpConnect()
# updateStatus()
#
# ___ initUI
# tableView.hH.. .setStretchLastSection( st.
# tableView.hH.. .sSRM..(?HV...Stretch)
# layout.aW..(tableView)
#
# hLayout _ ?HBL..
# hLayout.aW..(prevButton)
# hLayout.aW..(nextButton)
# hLayout.aW..(QLabel("Jump To"))
# switchPageLineEdit.setFixedWidth(40)
# hLayout.aW..(switchPageLineEdit)
# hLayout.aW..(QLabel("page"))
# hLayout.aW..(switchPageButton)
# hLayout.aW..(QLabel("Current page:"))
# hLayout.aW..(currentPageLabel)
# hLayout.aW..(QLabel("Total pages:"))
# hLayout.aW..(totalPageLabel)
# hLayout.addStretch(1)
#
# layout.aL..(hLayout)
# sL..(layout)
#
# sWT..("DataGrid")
# r..(600, 300)
#
# ___ setUpConnect
# prevButton.c__.c..(onPrevPage)
# nextButton.c__.c..(onNextPage)
# switchPageButton.c__.c..(onSwitchPage)
#
# ___ initializedModel
# db _ ?SD...aD..("QSQLITE")
# db.sDN..("/home/user/test.db")
# __ no. db.o..
# r_ F..
# queryModel.setHeaderData(0, __.H.., "ID")
# queryModel.setHeaderData(1, __.H.., "Name")
# queryModel.setHeaderData(2, __.H.., "Sex")
# queryModel.setHeaderData(3, __.H.., "Age")
# # Get all the records of the table
# sql _ "SELECT * FROM student"
# queryModel.setQuery(sql, db)
# totalRecordCount _ queryModel.rowCount()
# __ totalRecordCount % pageRecordCount __ 0:
# totalPage _ totalRecordCount / pageRecordCount
# ____
# totalPage _ int(totalRecordCount / pageRecordCount) + 1
# # Show Page 1
# sql _ "SELECT * FROM student limit %d,%d" % (0, pageRecordCount)
# queryModel.setQuery(sql, db)
#
# ___ onPrevPage
# currentPage -_ 1
# limitIndex _ (currentPage - 1) * pageRecordCount
# queryRecord(limitIndex)
# updateStatus()
#
# ___ onNextPage
# currentPage +_ 1
# limitIndex _ (currentPage - 1) * pageRecordCount
# queryRecord(limitIndex)
# updateStatus()
#
# ___ onSwitchPage
# szText _ switchPageLineEdit.t__()
# pattern _ re.compile('^[0-9]+$')
# match _ pattern.match(szText)
# __ no. match:
# ?MB...information "Tips", "please enter a number.")
# r_
# __ szText __ "":
# ?MB...information "Tips", "Please enter a jump page.")
# r_
# pageIndex _ int(szText)
# __ pageIndex > totalPage or pageIndex < 1:
# ?MB...information "Tips", "No page specified, re-enter.")
# r_
#
# limitIndex _ (pageIndex - 1) * pageRecordCount
# queryRecord(limitIndex)
# currentPage _ pageIndex
# updateStatus()
#
# # Query records based on paging
# ___ queryRecord limitIndex):
# sql _ "SELECT * FROM student limit %d,%d" % (limitIndex, pageRecordCount)
# queryModel.setQuery(sql)
#
# # Update Spatial Status
# ___ updateStatus
# currentPageLabel.sT..(st.(currentPage))
# totalPageLabel.sT..(st.(totalPage))
# __ currentPage <_ 1:
# prevButton.sE.. F..
# ____
# prevButton.sE..( st.
#
# __ currentPage >_ totalPage:
# nextButton.sE.. F..
# ____
# nextButton.sE..( st.
#
# # Close database connection when interface is closed
# ___ closeEvent event):
# db.c..
#
# __ __name__ __ "__main__":
# app _ ?A..(___.a..
# window _ DataGrid()
# window.s..
# ___.e.. ?.e.. | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
ddc1653e2d5e0653376da3e54a9f3d2962cead2d | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/create_datakey_without_plaintext_request.py | 2f468730dd17b485d1b5a2c0f176dd90c739fe61 | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,539 | py | # coding: utf-8
import pprint
import re
import six
class CreateDatakeyWithoutPlaintextRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version_id': 'str',
'body': 'CreateDatakeyRequestBody'
}
attribute_map = {
'version_id': 'version_id',
'body': 'body'
}
def __init__(self, version_id=None, body=None):
"""CreateDatakeyWithoutPlaintextRequest - a model defined in huaweicloud sdk"""
self._version_id = None
self._body = None
self.discriminator = None
self.version_id = version_id
if body is not None:
self.body = body
@property
def version_id(self):
"""Gets the version_id of this CreateDatakeyWithoutPlaintextRequest.
:return: The version_id of this CreateDatakeyWithoutPlaintextRequest.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this CreateDatakeyWithoutPlaintextRequest.
:param version_id: The version_id of this CreateDatakeyWithoutPlaintextRequest.
:type: str
"""
self._version_id = version_id
@property
def body(self):
"""Gets the body of this CreateDatakeyWithoutPlaintextRequest.
:return: The body of this CreateDatakeyWithoutPlaintextRequest.
:rtype: CreateDatakeyRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateDatakeyWithoutPlaintextRequest.
:param body: The body of this CreateDatakeyWithoutPlaintextRequest.
:type: CreateDatakeyRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateDatakeyWithoutPlaintextRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
7e1e14422e5c20ca970f9ac3f337d8f70a1365ef | 99985209fb8fa250886db43ee8c4bd3de9ec4ae6 | /Iris_flower_predict/iris.py | 6b6d64458aedccec48cfa916357bf540cc5498f8 | [] | no_license | Arpankarar/mini-data_science-projects | c7f986eacfb0901187981cbe29b978b38a3dddac | 1721226f505a6e41d3588ecc9cf57c1171c7f776 | refs/heads/master | 2023-06-24T02:01:28.406361 | 2021-07-31T08:58:48 | 2021-07-31T08:58:48 | 390,634,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import streamlit as st
import pandas as pd
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
st.write("""
# Simple Iris Flower Prediction App
This app predicts the **Iris flower** type!
""")
st.sidebar.header('User Input Parameters')
def user_input_features():
sepal_length = st.sidebar.slider('Sepal length', 4.3, 7.9, 5.4)
sepal_width = st.sidebar.slider('Sepal width', 2.0, 4.4, 3.4)
petal_length = st.sidebar.slider('Petal length', 1.0, 6.9, 1.3)
petal_width = st.sidebar.slider('Petal width', 0.1, 2.5, 0.2)
data = {'sepal_length': sepal_length,
'sepal_width': sepal_width,
'petal_length': petal_length,
'petal_width': petal_width}
features = pd.DataFrame(data, index=[0])
return features
df = user_input_features()
st.subheader('User Input parameters')
st.write(df)
iris = datasets.load_iris()
X = iris.data
Y = iris.target
clf = RandomForestClassifier()
clf.fit(X, Y)
prediction = clf.predict(df)
prediction_proba = clf.predict_proba(df)
st.subheader('Class labels and their corresponding index number')
st.write(iris.target_names)
st.subheader('Prediction')
st.write(iris.target_names[prediction])
st.subheader('Prediction Probability')
st.write(prediction_proba) | [
"you@example.com"
] | you@example.com |
01e44159ebc667da0d152037de82f1570e56a198 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02796/s829176641.py | db5435558a236408a2920925ea8b5565137ac7e4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | _, *_XL = open(0).read().split()
XL = list(zip(*[map(int, iter(_XL))]*2))
ans = 0
t = -10**10 # 数直線のマイナス方向からスタートする
arms = [(X-L, X+L) for X, L in XL]
arms_sorted = sorted(arms, key=lambda k:k[1])
ans = 0
for s, e in arms_sorted:
if t <= s:
t = e
ans += 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f605fca32647d4acd9406b4bfc685b273229d17d | 640729166526060d5761458745a74a02b6fa4213 | /notebooks/solutions/day4-01-02.py | a893dd5268ede5252fcd6ca9cc82e21c705788dc | [] | no_license | AI-BlackBelt/yellow | 9dbb8b973a39d29ee071fb46fa164127836e0dbb | 560dba2268fa8dd4e4fc327bfe009b79784022ab | refs/heads/master | 2020-04-21T12:25:49.082139 | 2020-03-23T15:47:56 | 2020-03-23T15:47:56 | 169,561,717 | 8 | 3 | null | 2019-05-27T15:53:20 | 2019-02-07T11:32:49 | Jupyter Notebook | UTF-8 | Python | false | false | 150 | py | knn = KNeighborsClassifier(n_neighbors=best_n_neighbors)
knn.fit(X_train, y_train)
print("test-set score: {:.3f}".format(knn.score(X_test, y_test)))
| [
"g.louppe@gmail.com"
] | g.louppe@gmail.com |
36a49e6a2e38193458fb28a4d0df0bb692bf122d | dd5b7241ae3deed66254466d6e089cbb15ff0623 | /build/driver/depth_camera/iai_kinect2-master/kinect2_bridge/catkin_generated/pkg.installspace.context.pc.py | da100c56a86553c3e2284bf0a413954d9dea2c01 | [
"BSD-2-Clause"
] | permissive | lty1994/ros_project | 189dde5842a5bcb9392a70383a37822ccafb7de2 | d55ce07c592d545f9a43330fa6bf96af6651575f | refs/heads/master | 2020-04-14T16:14:22.878838 | 2019-01-04T05:31:46 | 2019-01-04T05:31:46 | 163,946,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/autolabor/catkin_ws/install/include".split(';') if "/home/autolabor/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "kinect2_registration".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kinect2_bridge"
PROJECT_SPACE_DIR = "/home/autolabor/catkin_ws/install"
PROJECT_VERSION = "0.0.1"
| [
"lty2008@vip.qq.com"
] | lty2008@vip.qq.com |
92ef1e32731390c9733ad4e327465dc20028c848 | 6be845bf70a8efaf390da28c811c52b35bf9e475 | /windows/Resources/Python/Core/Lib/encodings/utf_32.py | ba16204eb2a42eb85e80a813dddfdc8cb0d0d369 | [] | no_license | kyeremalprime/ms | 228194910bf2ed314d0492bc423cc687144bb459 | 47eea098ec735b2173ff0d4e5c493cb8f04e705d | refs/heads/master | 2020-12-30T15:54:17.843982 | 2017-05-14T07:32:01 | 2017-05-14T07:32:01 | 91,180,709 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,658 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: utf_32.py
"""
Python 'utf-32' Codec
"""
import codecs
import sys
encode = codecs.utf_32_encode
def decode(input, errors='strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
return
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
return
def getstate(self):
if self.encoder is None:
return 2
else:
return 0
def setstate(self, state):
if state:
self.encoder = None
elif sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
return
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
output, consumed, byteorder = codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError('UTF-32 stream does not start with BOM')
return (output, consumed)
else:
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
return
def getstate(self):
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
if self.decoder is None:
return (state, 2)
else:
addstate = int((sys.byteorder == 'big') != (self.decoder is codecs.utf_32_be_decode))
return (
state, addstate)
def setstate(self, state):
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = codecs.utf_32_be_decode if sys.byteorder == 'big' else codecs.utf_32_le_decode
elif state == 1:
self.decoder = codecs.utf_32_le_decode if sys.byteorder == 'big' else codecs.utf_32_be_decode
else:
self.decoder = None
return
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
self.encoder = None
codecs.StreamWriter.__init__(self, stream, errors)
return
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
return
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, errors)
return
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
object, consumed, byteorder = codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError, 'UTF-32 stream does not start with BOM'
return (object, consumed)
def getregentry():
return codecs.CodecInfo(name='utf-32', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter) | [
"kyeremalprime@gmail.com"
] | kyeremalprime@gmail.com |
8a8938f84a7c687b66e0d1b18d01c112f7864e2f | 32fb5fbaf49cf767f276fc6cee0c31d2f63de159 | /node_modules/secp256k1/build/config.gypi | 3a1a2f612a9e9bb9a943190cc0e0235962abe708 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | laddukavala/ladduBabu | 9d7b771516e78a3091da44ff89b01a2aaeb50eb6 | a47238f8db435fc2561bd4b994caaa2e5e36c555 | refs/heads/master | 2022-12-14T20:11:30.634919 | 2019-06-01T06:29:45 | 2019-06-01T06:29:45 | 189,697,323 | 0 | 0 | null | 2022-12-09T04:46:01 | 2019-06-01T06:11:05 | JavaScript | UTF-8 | Python | false | false | 4,933 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 59,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/9.3.0_1",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "59.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "9.0",
"nodedir": "/Users/amareshjana/.node-gyp/9.3.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"viewer": "man",
"commit_hooks": "true",
"browser": "",
"only": "",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"git_tag_version": "true",
"cert": "",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"message": "%s",
"key": "",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"logs_max": "10",
"always_auth": "",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"searchlimit": "20",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"userconfig": "/Users/amareshjana/.npmrc",
"init_module": "/Users/amareshjana/.npm-init.js",
"cidr": "",
"node_version": "9.3.0",
"user": "",
"save": "true",
"editor": "vi",
"auth_type": "legacy",
"ignore_prepublish": "",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/amareshjana/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/5.6.0 node/v9.3.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"umask": "0022",
"init_version": "1.0.0",
"node_options": "",
"scope": "",
"git": "git",
"init_author_name": "",
"unsafe_perm": "true",
"tmp": "/var/folders/78/030c1h4n5j9c7pz7r80xw0x80000gn/T",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
6b9226b90a7de587d71b30cad44ecff48e5b36f2 | 69096ca0d67b3d6809a2fe05af51341df62ebc60 | /tibiapy/errors.py | 7d46deea3b3e3d421b16d9b41493ecba7f3cf1fd | [
"Apache-2.0"
] | permissive | Galarzaa90/tibia.py | 6f648aff8b6fbac7be4886435711f7ff08420402 | f8c145dd597c558398bac50e035711e34863b571 | refs/heads/main | 2023-08-17T15:50:31.354488 | 2023-08-17T14:00:07 | 2023-08-17T14:00:07 | 143,892,750 | 30 | 12 | Apache-2.0 | 2023-08-24T17:24:19 | 2018-08-07T15:25:23 | Python | UTF-8 | Python | false | false | 2,521 | py | """Exceptions thrown by tibia.py."""
from enum import Enum
from typing import Type, Any
class TibiapyException(Exception):
"""Base exception for the tibiapy module.
All exceptions thrown by the module are inherited from this.
"""
pass
class InvalidContent(TibiapyException):
"""Exception thrown when the provided content is unrelated for the calling function.
This usually means that the content provided belongs to a different website or section of the website.
This serves as a way to differentiate those cases from a parsing that returned no results (e.g. Character not found)
In some cases this can mean that Tibia.com's format has changed and the library needs updating.
Attributes
----------
original: :class:`Exception`
The original exception that caused this exception.
"""
def __init__(self, message, original=None):
super().__init__(message)
self.original = original
class NetworkError(TibiapyException):
"""Exception thrown when there was a network error trying to fetch a resource from the web.
Attributes
----------
original: :class:`Exception`
The original exception that caused this exception.
fetching_time: :class:`float`
The time between the request and the response.
"""
def __init__(self, message, original=None, fetching_time=0):
super().__init__(message)
self.original = original
self.fetching_time = fetching_time
class Forbidden(NetworkError):
"""A subclass of :class:`NetworkError` thrown when Tibia.com returns a 403 status code.
Tibia.com returns a 403 status code when it detects that too many requests are being done.
This has its own subclass to let the user decide to treat this differently than other network errors.
"""
class SiteMaintenanceError(NetworkError):
"""A subclass of :class:`NetworkError` thrown when Tibia.com is down for maintenance.
When Tibia.com is under maintenance, all sections of the website redirect to maintenance.tibia.com.
"""
class EnumValueError(ValueError):
def __init__(self, enum: Type[Enum], value: Any) -> None:
self.enum = enum
super().__init__(f"{value!r} is not a valid value for {enum.__name__}. Expected names ({self.names}) or values ({self.values})")
@property
def names(self):
return ", ".join(e.name for e in self.enum)
@property
def values(self):
return ", ".join(str(e.value) for e in self.enum)
| [
"allan.galarza@gmail.com"
] | allan.galarza@gmail.com |
6311dfd903716c2ca23ce2202aa867bc5a567a78 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/1755.py | 8c9f6f86daffb3dad052205a0bf1c9fee9d771c6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,502 | py | import sys
inFile = open(sys.argv[1], 'r')
outFile = open(sys.argv[1][:-2]+"out", "w")
number_of_input = int(inFile.readline().rstrip("\n"))
#print number_of_input
count = 0
while(count != number_of_input):
matrix = []
for line in range(0,4):
attrib = inFile.readline().rstrip("\n")
attrib = list(attrib)
matrix.append(attrib)
inFile.readline().rstrip("\n")
count = count + 1
rTest = 0;
cTest = 0;
LRTest = 0;
RLTest = 0;
draw = 0;
dot = 0;
who = ""
diagTest = 0;
for i in range(0,1):
LR = ""
RL = ""
for j in range(0,4):
LR = LR + matrix[j][j]
RL = RL + matrix[j][3-j]
xLRCount = LR.count("X")
oLRCount = LR.count("O")
xRLCount = RL.count("X")
oRLCount = RL.count("O")
if(xLRCount >= 3):
if(xLRCount == 4):
won = "X"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
diagTest = 1
break;
else:
if(("O" not in LR) and ("." not in LR) and ("T" in LR)):
won = "X"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
diagTest = 1
break;
if(oLRCount >= 3):
if(oLRCount == 4):
won = "O"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
diagTest = 1
break;
else:
if(("X" not in LR) and ("." not in LR) and ("T" in LR)):
won = "O"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
diagTest = 1
break;
if(xRLCount >= 3):
if(xRLCount == 4):
won = "X"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
diagTest = 1
break;
else:
if(("O" not in RL) and ("." not in RL) and ("T" in RL)):
won = "X"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
diagTest = 1
break;
if(oRLCount >= 3):
if(oRLCount == 4):
won = "O"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
diagTest = 1
break;
else:
if(("X" not in RL) and ("." not in RL) and ("T" in RL)):
won = "O"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
diagTest = 1
break;
if(diagTest == 0):
##print "Continue"
for r in range(0,4):
LR = ""
TB = ""
for c in range(0,4):
LR = LR + matrix[r][c]
TB = TB + matrix[c][r]
xLRCount = LR.count("X")
oLRCount = LR.count("O")
xTBCount = TB.count("X")
oTBCount = TB.count("O")
if(xLRCount >= 3):
if(xLRCount == 4):
won = "X"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
rTest = 1
break;
else:
if(("O" not in LR) and ("." not in LR) and ("T" in LR)):
won = "X"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
rTest = 1
break;
if(oLRCount >= 3):
if(oLRCount == 4):
won = "O"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
rTest = 1
break;
else:
if(("X" not in LR) and ("." not in LR) and ("T" in LR)):
won = "O"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
rTest = 1
break;
if(xTBCount >= 3):
if(xTBCount == 4):
won = "X"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
cTest = 1
break;
else:
if(("O" not in TB) and ("." not in TB) and ("T" in TB)):
won = "X"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
cTest = 1
break;
if(oTBCount >= 3):
if(oTBCount == 4):
won = "O"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
cTest = 1
break;
else:
if(("X" not in TB) and ("." not in TB) and ("T" in TB)):
won = "O"
#print "Case #%d: %c won" %(count, won)
outFile.write("Case #%d: %c won\n" %(count, won))
cTest = 1
break;
if(cTest == 0 and rTest == 0):
for r in range(0,4):
for c in range(0,4):
if(matrix[r][c] == "."):
dot = dot + 1
if(dot > 0):
#print "Case #%d: Game has not completed" %(count)
outFile.write("Case #%d: Game has not completed\n" %(count))
else:
#print "Case #%d: Draw" %(count)
outFile.write("Case #%d: Draw\n" %(count))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
3405e2abb735e5a7131c3bcfa7208aef3d54ecfc | 5a56592fb347f650cd3c7ada273bf58902c8c925 | /05_proteins_to_graph.py | c61886adf64c7a28c39d8467c3f76d6333e2bdc4 | [] | no_license | chelseaju/ProteinGraph | 05a1541719442966a76a9f8e11bc2552f41ada75 | 75a96a4eab4a7b59b18be4db209c855f2912ab1a | refs/heads/master | 2016-09-08T10:26:50.338110 | 2016-01-27T00:58:36 | 2016-01-27T00:58:36 | 30,887,654 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | """
Retrieve proteins from a given family
Convert these proteins into graph format
"""
import argparse, re, os, time, random
from pdb import *
from graph import *
from echo import *
POSITIVE = []
def build_pfam_reference(pfam_ref, pfam_id):
fh = open(pfam_ref, 'r')
for line in fh:
data = line.split("\t")
pdb_id = data[0]
chain = data[1]
pfam = data[4]
if(not pdb_id == "PDB_ID"):
pdb_name = pdb_id + "_" + chain
pfam_match = re.match(r'(.*)\.(\d+)', pfam)
if(pfam_match):
pfam = pfam_match.group(1)
if(pfam == pfam_id):
POSITIVE.append(pdb_name)
fh.close()
echo("Building Pfam Reference")
# positive graphs
def retrieve_graph(pfam_id, edge_info, dir):
positive_count = 0
positive_candidates = set(POSITIVE)
# select_candidates = random.sample(positive_candidates, 11)
for pdb_chain in sorted(positive_candidates):
(pdb, chain) = pdb_chain.split('_')
# distance file name
pdb_dst_file = dir + pdb_chain + ".dist"
# graph file name
pdb_graph_file = dir + pdb_chain + ".txt"
# parse pdb data
pdb_info = parse_pdb_by_id(pdb, chain)
if(pdb_info):
# comput distance
pw_dist = pairwise_distance(pdb_info, pdb_dst_file)
# convert structure to graph
title = pfam_id+ " " + pdb_chain
pdb_to_graph(pw_dist, pdb_graph_file, edge_info, positive_count, title)
positive_count = positive_count + 1
if(positive_count % 100 == 0):
time.sleep(3)
return positive_count
def main(parser):
options = parser.parse_args()
edge_ref = options.eref
fam_ref = options.fref
fam_id = options.fam
ftype = options.ftype
dir = options.dir
count = options.count
if(dir[-1] != "/"):
dir += "/"
# create directory for selected pfam
fam_dir = dir + fam_id + "/"
os.system("mkdir -p %s " %(fam_dir))
# build references
edge_info = build_edge_guideline(edge_ref)
if(ftype == "pfam"):
build_pfam_reference(fam_ref, fam_id)
elif(ftype == "scop"):
build_scop_reference(fam_ref, fam_id)
num_protein = retrieve_graph(fam_id, edge_info, fam_dir)
if(count and int(count) != num_protein):
echo("Warning! %s contains %s proteins, but only has %d proteins" %(fam_id, count, num_protein))
if(num_protein < 10):
os.system("rm -rf %s " %(fam_dir))
echo("%s does not have more than 10 proteins" %(fam_id))
else:
echo("Retrieving %d proteins for %s" %(num_protein, fam_id))
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='05_proteins_to_graph.py')
parser.add_argument("-e", "--edge_reference", dest = "eref", type=str, help="edge reference file", required = True)
parser.add_argument("-r", "--family_reference", dest = "fref", type=str, help="pfam or scop association file", required = True)
parser.add_argument("-f", "--family_id", dest = "fam", type=str, help="family id", required = True)
parser.add_argument("-t", "--family_type", dest = "ftype", type=str, help="pfam or scop", required = True)
parser.add_argument("-d", "--directory", dest = "dir", type=str, help="directory for output", required = True)
parser.add_argument("-c", "--count", dest = "count", type=str, help="number of protein", required = False)
main(parser)
| [
"chelseaju@ucla.edu"
] | chelseaju@ucla.edu |
3746a660384a515254a4dccf0078e0b6055607fc | 48e294e8cec03960710b10fe6774b8e7145f4d50 | /documentation/environment_canada/ec_density.py | 09567cbf9b16c4a11ab57d39bc738bcf1d7d6a66 | [
"Unlicense"
] | permissive | NOAA-ORR-ERD/OilLibrary | 891744dc9877c40fe55a0f38712d9cc4d3fd12fc | a09a51721e43b040d19549531c0370efc956ebd0 | refs/heads/master | 2021-12-23T16:00:49.425884 | 2021-12-01T22:33:11 | 2021-12-01T22:33:11 | 57,332,172 | 11 | 15 | NOASSERTION | 2021-09-16T08:51:49 | 2016-04-28T20:37:11 | Python | UTF-8 | Python | false | false | 4,510 | py |
from oil_library.models import Density
from ec_xl_parse import (get_oil_properties_by_name,
get_oil_properties_by_category)
from ec_oil_props import get_oil_weathering
def get_oil_densities(oil_columns, field_indexes):
'''
Getting densities out of this datasheet is more tricky than it should
be. There are two categories, density at 15C, and density at 0/5C.
I dunno, I would have organized the data in a more orthogonal way.
'''
weathering = get_oil_weathering(oil_columns, field_indexes)
densities_at_0c = get_oil_densities_at_0c(oil_columns,
field_indexes,
weathering)
densities_at_5c = get_oil_densities_at_5c(oil_columns,
field_indexes,
weathering)
densities_at_15c = get_oil_densities_at_15c(oil_columns,
field_indexes,
weathering)
return densities_at_0c + densities_at_5c + densities_at_15c
def get_oil_densities_at_15c(oil_columns, field_indexes, weathering):
densities = []
props = get_oil_properties_by_category(oil_columns, field_indexes,
'density_at_15_c_g_ml_astm_d5002')
prop_names = props.keys()
for idx, vals in enumerate(zip(*props.values())):
density_obj = dict(zip(prop_names, [v[0].value for v in vals]))
# add some properties to the oil that we expect
density_obj['idx'] = idx
density_obj['weathering'] = weathering[idx]
density_obj['ref_temp_k'] = 273.15 + 15.0
density_obj['kg_m_3'] = density_obj['density_15_c_g_ml']
if density_obj['kg_m_3'] is not None:
density_obj['kg_m_3'] *= 1000.0
# prune some properties that we don't want in our object
del density_obj['density_15_c_g_ml']
densities.append(density_obj)
return [Density(**d) for d in densities
if d['kg_m_3'] not in (None, 0.0)]
def get_oil_densities_at_0c(oil_columns, field_indexes, weathering):
densities = []
props = get_oil_properties_by_category(oil_columns, field_indexes,
'density_at_0_5_c_g_ml_astm_d5002')
prop_names = props.keys()
for idx, vals in enumerate(zip(*props.values())):
density_obj = dict(zip(prop_names, [v[0].value for v in vals]))
# add some properties to the oil that we expect
density_obj['idx'] = idx
density_obj['weathering'] = weathering[idx]
density_obj['ref_temp_k'] = 273.15
density_obj['kg_m_3'] = density_obj['density_0_c_g_ml']
if density_obj['kg_m_3'] is not None:
density_obj['kg_m_3'] *= 1000.0
# prune some properties that we don't want in our object
del density_obj['density_0_c_g_ml']
del density_obj['density_5_c_g_ml']
densities.append(density_obj)
return [Density(**d) for d in densities
if d['kg_m_3'] not in (None, 0.0)]
def get_oil_densities_at_5c(oil_columns, field_indexes, weathering):
densities = []
props = get_oil_properties_by_category(oil_columns, field_indexes,
'density_at_0_5_c_g_ml_astm_d5002')
prop_names = props.keys()
for idx, vals in enumerate(zip(*props.values())):
density_obj = dict(zip(prop_names, [v[0].value for v in vals]))
# add some properties to the oil that we expect
density_obj['idx'] = idx
density_obj['weathering'] = weathering[idx]
density_obj['ref_temp_k'] = 273.15 + 5.0
density_obj['kg_m_3'] = density_obj['density_5_c_g_ml']
if density_obj['kg_m_3'] is not None:
density_obj['kg_m_3'] *= 1000.0
# prune some properties that we don't want in our object
del density_obj['density_0_c_g_ml']
del density_obj['density_5_c_g_ml']
densities.append(density_obj)
return [Density(**d) for d in densities
if d['kg_m_3'] not in (None, 0.0)]
def get_oil_api(oil_columns, field_indexes):
'''
Get the oil API gravity.
'''
cells = get_oil_properties_by_name(oil_columns, field_indexes,
'api_gravity', 'calculated_api_gravity')
return [c[0].value for c in cells if c[0].value is not None]
| [
"james.makela@noaa.gov"
] | james.makela@noaa.gov |
3dd1b171406bc7721b63a12c64a6e50545f3769d | 7c11455f583c73b4c7c57e61a78229231a3798f8 | /reinforcement_learning/models/base_model.py | 1cbd8f29f1bb922c7c1ce17ae3bcc78a6b4b9c25 | [] | no_license | twobackfromtheend/quRL | d75215f24fbe3c4bccfce5f627c20655e0329951 | 8b40017793ca591e8d8cba469bdd71c32b0a1d5a | refs/heads/master | 2020-04-03T17:45:54.975865 | 2019-01-19T12:24:42 | 2019-01-19T12:25:08 | 155,458,154 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | import numpy as np
class BaseModel:
def __init__(self, inputs: int, outputs: int):
self.inputs = inputs
self.outputs = outputs
def build_model(self):
raise NotImplementedError
def save_model(self, filename: str):
raise NotImplementedError
def create_copy(self):
return self.__class__(**self.__dict__)
def set_learning_rate(self, learning_rate: float):
raise NotImplementedError
def predict(self, x: np.ndarray) -> np.ndarray:
raise NotImplementedError
def train_on_batch(self, x: np.ndarray, y: np.ndarray):
raise NotImplementedError
| [
"harry1996@gmail.com"
] | harry1996@gmail.com |
62bb90eaf5656edc3f0d26b8615d22a301750586 | 528976ba81dfab381e2273d9784c7e21d5c90c34 | /snake/main.py | d3523a15e019c4c8e174d2170158039e1a98978f | [
"MIT"
] | permissive | uaiuaief/Machine-Learning-for-Snake-Game | d65478d08b8d2bf4c06e0668dfdb41b1b6d0514c | 1a36ef4468eea9f80f44fb1b8d115790262320c5 | refs/heads/master | 2020-09-14T20:24:55.545625 | 2019-11-28T21:15:46 | 2019-11-28T21:15:46 | 223,244,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | import random
from tkinter import mainloop, CENTER, NW
class SnakeAPP:
config = {
"TICK": 100,
"SNAKE_BODY_COLOR": 'green',
"SNAKE_HEAD_COLOR": 'lime green',
"BACKGROUND_COLOR": 'black',
"FOOD_COLOR": 'red',
"GRID_WIDTH_IN_SQMS": 20,
"GRID_HEIGHT_IN_SQMS": 20,
"GRID_SQM_SIZE": 30,
"SCREEN_TITLE": 'Snake Game',
# Starting Screen:
"BUTTON_TEXT_FONT": 'verdana',
"BUTTON_TEXT_ANCHOR": 'nw',
"PLAY_BUTTON_TEXT_COORDINATES": (77, 215),
# "PLAY_BUTTON_TEXT_COORDINATES": (100, 200),
"PLAY_BUTTON_TEXT": "Normal Mode",
"PLAY_BUTTON_TEXT_COLOR": "gray60",
"PLAY_BUTTON_TEXT_SIZE": 50,
"PLAY_BUTTON_ACTIVE_FILL": 'white',
"AI_BUTTON_TEXT_COLOR": "gray60",
"AI_BUTTON_TEXT_SIZE": 50,
"AI_BUTTON_TEXT_COORDINATES": (158, 315),
"AI_BUTTON_TEXT": 'AI Mode',
"AI_BUTTON_ACTIVE_FILL": 'white',
"SEE_FITTEST_BUTTON_TEXT_COLOR": "SkyBlue4",
"SEE_FITTEST_BUTTON_TEXT_SIZE": 12,
"SEE_FITTEST_BUTTON_TEXT_COORDINATES": (472, 10),
"SEE_FITTEST_BUTTON_TEXT": 'Watch Best AI',
"SEE_FITTEST_BUTTON_ACTIVE_FILL": 'SkyBlue2',
"PLOT_DISPLAY_BUTTON_TEXT_COLOR": "SkyBlue4",
"PLOT_DISPLAY_BUTTON_TEXT_SIZE": 12,
"PLOT_DISPLAY_BUTTON_TEXT_COORDINATES": (388, 10),
"PLOT_DISPLAY_BUTTON_TEXT": 'Charts |',
"PLOT_DISPLAY_BUTTON_ACTIVE_FILL": 'SkyBlue2',
# AI Screen:
"MENU_BUTTON_TEXT_FONT": 'verdana',
"MENU_BUTTON_TEXT_COORDINATES": (530, 10),
"MENU_BUTTON_TEXT": "Menu",
"MENU_BUTTON_TEXT_COLOR": "SkyBlue4",
"MENU_BUTTON_TEXT_SIZE": 18,
"MENU_BUTTON_ACTIVE_FILL": 'SkyBlue2',
"TICK_BUTTON_TEXT_FONT": 'arial',
"TICK_BUTTON_TEXT_COORDINATES": (150, 10),
"TICK_BUTTON_TEXT": "Tick",
"TICK_BUTTON_TEXT_COLOR": "gray80",
"TICK_BUTTON_TEXT_SIZE": 8,
"GRAPHICS_BUTTON_TEXT_FONT": 'arial',
"GRAPHICS_BUTTON_TEXT_COORDINATES": (200, 10),
"GRAPHICS_BUTTON_TEXT": "Graphics",
"GRAPHICS_BUTTON_TEXT_COLOR": "gray80",
"GRAPHICS_BUTTON_TEXT_SIZE": 8,
# Score:
# HIGH SCORE
"HIGH_SCORE_TEXT_FONT": 'arial',
"HIGH_SCORE_TEXT_ANCHOR": NW,
"HIGH_SCORE_LABEL_TEXT_COLOR": "gray80",
"HIGH_SCORE_LABEL_TEXT_SIZE": 10,
"HIGH_SCORE_LABEL_TEXT_COORDINATES": (10, 10),
"HIGH_SCORE_LABEL_TEXT": None,
"HIGH_SCORE_LABEL_ACTIVE_FILL": None,
# CURRENT SCORE
"CURRENT_SCORE_TEXT_FONT": 'arial',
"CURRENT_SCORE_TEXT_ANCHOR": CENTER,
"CURRENT_SCORE_LABEL_TEXT_COLOR": "gray3",
"CURRENT_SCORE_LABEL_TEXT_SIZE": 300,
"CURRENT_SCORE_LABEL_TEXT_COORDINATES": None,
"CURRENT_SCORE_LABEL_TEXT": None,
"CURRENT_SCORE_LABEL_ACTIVE_FILL": None,
# AI Configurations:
"WATCHING_TICK": 0.03,
"AI_TICK": 0.0,
"MUTATION_RATE": random.randint(20, 500)/100,
"MUTATION_THRESHOLD": 48,
"LIFE_SPAN": 45,
"POPULATION_SIZE": 120,
"APPLE_AMOUNT_TO_INCREASE": 45,
# PLOT Configurations:
'PLOT_X_AXIS_AMOUNT': 70,
"PLOT_SIZE": (6.04, 5),
}
@staticmethod
def run():
from snake.view import View
view = View()
view.goto_starting_screen()
mainloop()
| [
"="
] | = |
380ab18c6dccc5590c8334095f89d932b866f7ae | b6721322ada8cc2820df67c5d28c568edb28cde9 | /DectoRoman_2019_2020.py | ab3070bf36643dc4704e30c6110caf9d4845908c | [] | no_license | Ticonderoga/CoursInfoL2 | 0d68914b2cc94e8df42b296524a16e4b88d6b44d | 8d65b2f269ca1bd0e923082f9506194600969f0c | refs/heads/master | 2021-04-11T03:38:09.547703 | 2020-05-15T14:32:10 | 2020-05-15T14:32:10 | 248,989,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 17:04:01 2020
@author: phil
"""
def DectoRoman(deci) :
"""
Fonction permettant de transformer un nombre en chiffre romain
Parameters
----------
deci : Integer
Nombre à convertir en chiffres romains.
Returns
-------
rom : String
Les chiffres romains correspondant à deci.
"""
L_Dec = (1000,900,500,400,100,90,50,40,10,9,5,4,1)
L_Rom = ('M','CM','D','CD','C','XC','L','XL','X','IX','V','IV','I')
rom = ''
test = True
while test :
inprocess = True
for d,r in zip(L_Dec,L_Rom):
if deci >= d and inprocess :
deci = deci - d
rom = rom + r
inprocess = False
test = (deci>0)
return rom
if __name__ == "__main__" :
nombre = 1924
nombre_Romain = DectoRoman(nombre)
print(nombre_Romain)
| [
"travis@travis-ci.org"
] | travis@travis-ci.org |
65cad5d0de4d5aa9756f02fbc08038d47c0d8101 | 381dc64b93f0bd8cb25553f2415a4add64eb1d39 | /arrange_good_one.py | 8d0b0d53c4648f03dacb07dabf4d00c856a60821 | [] | no_license | EmuxEvans/py_learn | ce1abd29e5ba8533b4208101ad52aebd057c2038 | 69476ec5b482eb4b4ddce2aff4ed6e934e08b7f1 | refs/heads/master | 2021-01-21T02:55:59.716050 | 2015-03-19T13:17:29 | 2015-03-19T13:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | import copy
def arrange(n):
if n <= 0:
return
result = [[]]
for i in range(n):
temp = []
for each in result:
for j in range(i + 1):
t = copy.copy(each)
t.insert(j, i)
temp.append(t)
result = copy.deepcopy(temp)
return result
def arrangestr(s):
n = len(s)
list_n = arrange(n)
list_s = s[:]
temp = []
for L in list_n:
temp = []
for j in range(n):
temp.append(list_s[L[j]])
print "".join(temp)
print len(list_n)
arrangestr('abcdefg')
| [
"metathinkerk@gmail.com"
] | metathinkerk@gmail.com |
b1fb8191d74cba456913da37765a7243bad750a6 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/beginner/038_using_elementtree_to_parse_xml/save4_nopass.py | 610acd10a85062c21c2e9a451793e4d85cb06c73 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,493 | py | import xml.etree.ElementTree as ET
# from OMDB
xmlstring = '''<?xml version="1.0" encoding="UTF-8"?>
<root response="True">
<movie title="The Prestige" year="2006" rated="PG-13" released="20 Oct 2006" runtime="130 min" genre="Drama, Mystery, Sci-Fi" director="Christopher Nolan" />
<movie title="The Dark Knight" year="2008" rated="PG-13" released="18 Jul 2008" runtime="152 min" genre="Action, Crime, Drama" director="Christopher Nolan" />
<movie title="The Dark Knight Rises" year="2012" rated="PG-13" released="20 Jul 2012" runtime="164 min" genre="Action, Thriller" director="Christopher Nolan" />
<movie title="Dunkirk" year="2017" rated="PG-13" released="21 Jul 2017" runtime="106 min" genre="Action, Drama, History" director="Christopher Nolan" />
<movie title="Interstellar" year="2014" rated="PG-13" released="07 Nov 2014" runtime="169 min" genre="Adventure, Drama, Sci-Fi" director="Christopher Nolan"/>
</root>''' # noqa E501
def get_tree():
"""You probably want to use ET.fromstring"""
return ET.fromstring(xmlstring)
def get_movies():
"""Call get_tree and retrieve all movie titles, return a list or generator"""
movie_list = []
for movie_title in get_tree():
movie_list.append(movie_title.attrib["title"])
return movie_list
def get_movie_longest_runtime():
"""Call get_tree again and return the movie title for the movie with the longest
runtime in minutes, for latter consider adding a _get_runtime helper"""
pass | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
e11f5b8a6bce94fb2fb1eed0c75f6808a52e33b9 | 1aa6e732645f4603c05a1c9262f6fbb1af76b056 | /patchinfo/Google_Apps/gapps-task650.py | 0d9fc26ced139d01b3e77087393f74a2da5240a8 | [] | no_license | nauddin257/DualBootPatcher | f2831bdc72d8f94787a1d3ad94d0d85103316dd5 | 024af7ecb38ba6b4e3f1ae16ab81e32cd213864f | refs/heads/master | 2020-12-11T07:26:46.916515 | 2013-11-14T03:54:41 | 2013-11-14T03:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | from fileinfo import FileInfo
import re
file_info = FileInfo()
filename_regex = r"^gapps-jb\([0-9\.]+\)-[0-9\.]+\.zip$"
file_info.patch = 'Google_Apps/gapps-task650.dualboot.patch'
file_info.has_boot_image = False
def matches(filename):
if re.search(filename_regex, filename):
return True
else:
return False
def print_message():
print("Detected Task650's Google Apps zip")
def get_file_info():
return file_info
| [
"chenxiaolong@cxl.epac.to"
] | chenxiaolong@cxl.epac.to |
5e6661b7a1ff8065f2e5e82f2a5ce4f5f69c88b9 | 8b25a7984bd18fc356232083da0bb2f829a1dbd4 | /ineco_point_reward/__init__.py | 446998d61d4fb32feff1de8c38b9b0e55af31f0f | [] | no_license | anndream/new_mixprint_addons | f94067a1248cf3d30ce4e937d5fb3c96bc9cb482 | 1b4b04388e723dc7137dd8d2a29fdef3f59f4861 | refs/heads/master | 2020-04-09T19:17:36.882746 | 2015-09-10T04:41:13 | 2015-09-10T04:41:13 | 42,242,457 | 0 | 2 | null | 2015-09-10T12:13:56 | 2015-09-10T12:13:56 | null | UTF-8 | Python | false | false | 1,106 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import point
import sale
import res_partner
import product
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"thitithup@gmail.com"
] | thitithup@gmail.com |
e9025879b824e2a02721409d1fe7e3c7fa642b83 | 147519505f3c47e5f10d9679e07d3719931b9fd0 | /ecommerce/product/views.py | d03412aa093af7ddfd44127d9b19f824f955fc21 | [] | no_license | grbalmeida/hello-django | 85ed28d8d47a9a2e072f3eecd13d22fb2e977a31 | 9ef261ba5faeac3de8d36eeb7efa8974e5d1e661 | refs/heads/master | 2020-08-12T10:10:48.554349 | 2019-12-20T01:18:33 | 2019-12-20T01:18:33 | 214,748,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,585 | py | from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from django.views import View
from django.http import HttpResponse
from django.contrib import messages
from . import models
from user_profile.models import UserProfile, Address
class ProductList(ListView):
model = models.Product
template_name = 'product/list.html'
context_object_name = 'products'
paginate_by = 10
class ProductDetails(DetailView):
model = models.Product
template_name = 'product/details.html'
context_object_name = 'product'
slug_url_kwarg = 'slug'
class AddToCart(View):
def get(self, *args, **kwargs):
http_referer = self.request.META.get(
'HTTP_REFERER',
reverse('product:list')
)
variation_id = self.request.GET.get('vid')
if not variation_id:
messages.error(
self.request,
'Product not found'
)
return redirect(http_referer)
variation = get_object_or_404(models.Variation, id=variation_id)
product = variation.product
product_id = product.id
product_name = product.name
variation_name = variation.name or ''
variation_stock = variation.stock
variation_price = variation.price
variation_promotional_price = variation.promotional_price
amount = 1
slug = product.slug
image = product.image
image = image.name if image else ''
if variation.stock < 1:
messages.error(
self.request,
'Insufficient stock'
)
return redirect(http_referer)
if not self.request.session.get('cart'):
self.request.session['cart'] = {}
self.request.session.save()
cart = self.request.session.get('cart')
if variation_id in cart:
current_amount = cart[variation_id]['amount']
current_amount += 1
if variation_stock < current_amount:
messages.warning(
self.request,
f'Insufficient stock for {current_amount}x in {product_name} product. '
f'We add {variation_stock}x to your cart.'
)
current_amount = variation_stock
cart[variation_id]['amount'] = current_amount
cart[variation_id]['quantitative_price'] = variation_price * current_amount
cart[variation_id]['promotional_quantitative_price'] = \
variation_promotional_price * current_amount
else:
cart[variation_id] = {
'product_id': product_id,
'product_name': product_name,
'variation_id': variation_id,
'variation_name': variation_name,
'variation_price': variation_price,
'variation_promotional_price': variation_promotional_price,
'quantitative_price': variation_price,
'promotional_quantitative_price': variation_promotional_price,
'amount': amount,
'slug': slug,
'image': image
}
self.request.session.save()
messages.success(
self.request,
f'{product_name} {variation_name} product added to your cart'
)
return redirect(http_referer)
class RemoveFromCart(View):
def get(self, *args, **kwargs):
http_referer = self.request.META.get(
'HTTP_REFERER',
reverse('product:list')
)
variation_id = self.request.GET.get('vid')
if not variation_id:
return redirect(http_referer)
cart = self.request.session.get('cart')
if not cart:
return redirect(http_referer)
if variation_id not in cart:
return redirect(http_referer)
messages.success(
self.request,
f'Product {cart[variation_id]["product_name"]} '
f'{cart[variation_id]["variation_name"]} removed from your cart'
)
del cart[variation_id]
self.request.session.save()
return redirect(http_referer)
class Cart(View):
def get(self, *args, **kwargs):
context = {
'cart': self.request.session.get('cart')
}
return render(self.request, 'product/cart.html', context)
class PurchaseSummary(View):
def get(self, *args, **kwargs):
if not self.request.user.is_authenticated:
return redirect('user_profile:create')
user_profile_exists = UserProfile.objects.filter(user=self.request.user).exists()
user_profile = UserProfile.objects.get(user=self.request.user)
address = Address.objects.get(user_profile=user_profile)
if not user_profile_exists:
messages.error(
self.request,
'User without profile'
)
return redirect('user_profile:create')
if not self.request.session.get('cart'):
messages.error(
self.request,
'Empty cart'
)
return redirect('product:list')
context = {
'user': self.request.user,
'cart': self.request.session['cart'],
'address': address,
'user_profile': user_profile
}
return render(self.request, 'product/purchase-summary.html', context)
| [
"g.r.almeida@live.com"
] | g.r.almeida@live.com |
11115846bcf63ce3f18a74a99918a5763ab07da3 | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/enums/net_ip_config_info_ip_address_status.py | 72780ca843b9d782573c01057d893237fb90437c | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 323 | py |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
NetIpConfigInfoIpAddressStatus = Enum(
'deprecated',
'duplicate',
'inaccessible',
'invalid',
'preferred',
'tentative',
'unknown',
)
| [
"jmb@pexip.com"
] | jmb@pexip.com |
69b8440a1cf5de7d0991dc2472928b20cde2bdb3 | 43cdd7cb26fe44b1ed7de6a46f8b5e680c9b1372 | /tests/components/mqtt/test_subscription.py | 5ee784b8ab76967c31edb266f4a6e916a62b2fc7 | [
"Apache-2.0"
] | permissive | OpenPeerPower/Open-Peer-Power | 02ec5c133564b47c6f72f669e844a666643cacd6 | 940a04a88e8f78e2d010dc912ad6905ae363503c | refs/heads/master | 2022-08-16T09:38:49.994009 | 2021-05-29T03:54:13 | 2021-05-29T03:54:13 | 183,174,237 | 1 | 0 | Apache-2.0 | 2022-07-15T18:43:02 | 2019-04-24T07:35:47 | Python | UTF-8 | Python | false | false | 5,209 | py | """The tests for the MQTT subscription component."""
from unittest import mock
from openpeerpower.components.mqtt.subscription import (
async_subscribe_topics,
async_unsubscribe_topics,
)
from openpeerpower.core import callback
from tests.common import async_fire_mqtt_message, async_mock_mqtt_component
async def test_subscribe_topics(opp, mqtt_mock, caplog):
"""Test subscription to topics."""
calls1 = []
@callback
def record_calls1(*args):
"""Record calls."""
calls1.append(args)
calls2 = []
@callback
def record_calls2(*args):
"""Record calls."""
calls2.append(args)
sub_state = None
sub_state = await async_subscribe_topics(
opp,
sub_state,
{
"test_topic1": {"topic": "test-topic1", "msg_callback": record_calls1},
"test_topic2": {"topic": "test-topic2", "msg_callback": record_calls2},
},
)
async_fire_mqtt_message(opp, "test-topic1", "test-payload1")
assert len(calls1) == 1
assert calls1[0][0].topic == "test-topic1"
assert calls1[0][0].payload == "test-payload1"
assert len(calls2) == 0
async_fire_mqtt_message(opp, "test-topic2", "test-payload2")
assert len(calls1) == 1
assert len(calls2) == 1
assert calls2[0][0].topic == "test-topic2"
assert calls2[0][0].payload == "test-payload2"
await async_unsubscribe_topics(opp, sub_state)
async_fire_mqtt_message(opp, "test-topic1", "test-payload")
async_fire_mqtt_message(opp, "test-topic2", "test-payload")
assert len(calls1) == 1
assert len(calls2) == 1
async def test_modify_topics(opp, mqtt_mock, caplog):
"""Test modification of topics."""
calls1 = []
@callback
def record_calls1(*args):
"""Record calls."""
calls1.append(args)
calls2 = []
@callback
def record_calls2(*args):
"""Record calls."""
calls2.append(args)
sub_state = None
sub_state = await async_subscribe_topics(
opp,
sub_state,
{
"test_topic1": {"topic": "test-topic1", "msg_callback": record_calls1},
"test_topic2": {"topic": "test-topic2", "msg_callback": record_calls2},
},
)
async_fire_mqtt_message(opp, "test-topic1", "test-payload")
assert len(calls1) == 1
assert len(calls2) == 0
async_fire_mqtt_message(opp, "test-topic2", "test-payload")
assert len(calls1) == 1
assert len(calls2) == 1
sub_state = await async_subscribe_topics(
opp,
sub_state,
{"test_topic1": {"topic": "test-topic1_1", "msg_callback": record_calls1}},
)
async_fire_mqtt_message(opp, "test-topic1", "test-payload")
async_fire_mqtt_message(opp, "test-topic2", "test-payload")
assert len(calls1) == 1
assert len(calls2) == 1
async_fire_mqtt_message(opp, "test-topic1_1", "test-payload")
assert len(calls1) == 2
assert calls1[1][0].topic == "test-topic1_1"
assert calls1[1][0].payload == "test-payload"
assert len(calls2) == 1
await async_unsubscribe_topics(opp, sub_state)
async_fire_mqtt_message(opp, "test-topic1_1", "test-payload")
async_fire_mqtt_message(opp, "test-topic2", "test-payload")
assert len(calls1) == 2
assert len(calls2) == 1
async def test_qos_encoding_default(opp, mqtt_mock, caplog):
"""Test default qos and encoding."""
mock_mqtt = await async_mock_mqtt_component(opp)
@callback
def msg_callback(*args):
"""Do nothing."""
pass
sub_state = None
sub_state = await async_subscribe_topics(
opp,
sub_state,
{"test_topic1": {"topic": "test-topic1", "msg_callback": msg_callback}},
)
mock_mqtt.async_subscribe.assert_called_once_with(
"test-topic1", mock.ANY, 0, "utf-8"
)
async def test_qos_encoding_custom(opp, mqtt_mock, caplog):
"""Test custom qos and encoding."""
mock_mqtt = await async_mock_mqtt_component(opp)
@callback
def msg_callback(*args):
"""Do nothing."""
pass
sub_state = None
sub_state = await async_subscribe_topics(
opp,
sub_state,
{
"test_topic1": {
"topic": "test-topic1",
"msg_callback": msg_callback,
"qos": 1,
"encoding": "utf-16",
}
},
)
mock_mqtt.async_subscribe.assert_called_once_with(
"test-topic1", mock.ANY, 1, "utf-16"
)
async def test_no_change(opp, mqtt_mock, caplog):
"""Test subscription to topics without change."""
mock_mqtt = await async_mock_mqtt_component(opp)
@callback
def msg_callback(*args):
"""Do nothing."""
pass
sub_state = None
sub_state = await async_subscribe_topics(
opp,
sub_state,
{"test_topic1": {"topic": "test-topic1", "msg_callback": msg_callback}},
)
call_count = mock_mqtt.async_subscribe.call_count
sub_state = await async_subscribe_topics(
opp,
sub_state,
{"test_topic1": {"topic": "test-topic1", "msg_callback": msg_callback}},
)
assert call_count == mock_mqtt.async_subscribe.call_count
| [
"pcaston@arach.net.au"
] | pcaston@arach.net.au |
251e27541c26ade09017922536afaa65c6e7a613 | a816de2c05290e9a1dcfe1e3e50b96e36792898a | /TESSLCclass.py | 30552e94e69fbe6cad0cd43162dfb7f19ae0be04 | [] | no_license | r-cloutier/mdwarfparams | 2998defb6ed62b6ec8fe1d6a868a9541ea350c15 | e62f10473f91405e2f9cf4998e6ba85a5fd73243 | refs/heads/master | 2021-06-17T14:27:25.835864 | 2019-08-26T18:04:08 | 2019-08-26T18:04:08 | 145,714,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | from imports import *
from truncate_cmap import *
def loadpickle(fname):
fObj = open(fname, 'rb')
self = pickle.load(fObj)
fObj.close()
return self
class TESSLC:
def __init__(self, TICid, index):
try:
os.mkdir('PipelineResults')
except OSError:
pass
self.TICid = TICid
self.folder_full = 'PipelineResults/TIC_%i'%self.TICid
self.fname_full = '%s/TESSLC_%.5d'%(self.folder_full, index)
try:
os.mkdir(self.folder_full)
except OSError:
pass
self._pickleobject()
def _pickleobject(self):
fObj = open(self.fname_full, 'wb')
pickle.dump(self, fObj)
fObj.close()
| [
"cloutier@astro.utoronto.ca"
] | cloutier@astro.utoronto.ca |
4a0119a6b16b4ec5de4f7084e981c22e29875eae | b049ec2f36bb63537ca5b73717635f2dc0126cda | /399_Evaluate_Division/399_2.py | 4a28c1a4a9e5a35f0b88010918c5a7d6d2df7b9b | [] | no_license | massquantity/LeetCode | 01d29fe8922b7545140015efbda0f71b04043124 | e298cdab86a4de81bf5a44579c54b5bc7bcb1618 | refs/heads/master | 2021-07-20T07:18:38.109707 | 2021-07-03T04:11:15 | 2021-07-03T04:11:15 | 135,297,184 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
graph = collections.defaultdict(set)
weights = dict()
visited = set()
def bfs(start, end):
if (start, end) in weights:
return weights[(start, end)]
if start not in graph or end not in graph:
return -1.0
if start == end:
return 1.0
visited.add(start)
queue = collections.deque()
for n in graph[start]:
queue.append((n, weights[(start, n)]))
while queue:
n, w = queue.popleft()
if n == end:
return w
for neig in graph[n]:
if neig not in visited:
visited.add(neig)
weights[(start, neig)] = w * weights[(n, neig)]
queue.append((neig, weights[(start, neig)]))
return -1.0
for g, v in zip(equations, values):
graph[g[0]].add(g[1])
graph[g[1]].add(g[0])
weights[(g[0], g[1])] = v
weights[(g[1], g[0])] = 1.0 / v
res = list()
for q in queries:
visited.clear()
res.append(bfs(q[0], q[1]))
return res
| [
"wdmjjxg@163.com"
] | wdmjjxg@163.com |
afeaee0d2e5f5995448744d53b52869fc13fa776 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/perm_20200622013715.py | 9bb5e958673f171c3512fadc89c47141dea344c0 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | def sequence(n,k):
newArr = []
for i in range(1,n+1):
newArr.append(i)
# index == k/n-1!
answer = " "
i = 1
factor = 1
while i <= n-1:
factor *=i
i +=1
print(factor)
index = k/factor
sequence(3,3) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
06052e5198f1bd847230dc07432e93da469d8b0e | 3cca537e780ba900087b187d1494713f0c81a24d | /lenstools/tests/test_limber.py | 00cf52cad33fc6b031b5476bb4e990699ccf8f7a | [
"MIT"
] | permissive | apetri/LensTools | 4119c1b5c0570fb6e4078fa67fb3acd5b443c0a5 | 9151988bfe6fbd6809353a33cfb556d44b6806ed | refs/heads/master | 2023-07-19T02:03:26.708366 | 2021-01-21T14:26:47 | 2021-01-21T14:26:47 | 27,881,137 | 32 | 33 | null | 2023-07-12T10:53:00 | 2014-12-11T16:43:33 | Python | UTF-8 | Python | false | false | 680 | py | import os
from ..simulations.limber import LimberIntegrator
from ..utils.defaults import load_power_default
from .. import dataExtern
import numpy as np
import matplotlib.pyplot as plt
from astropy.cosmology import WMAP9
def test_convergence_power():
l = np.logspace(0.0,5.0,100)
integrator = LimberIntegrator(cosmoModel=WMAP9)
integrator.load3DPowerSpectrum(load_power_default,os.path.join(dataExtern(),"camb_output"),"fiducial_matterpower_")
Cl = integrator.convergencePowerSpectrum(l)
plt.plot(l,l*(l+1)*Cl/(2.0*np.pi))
plt.xscale("log")
plt.yscale("log")
plt.xlabel("l")
plt.ylabel("l(l+1)C_l/2*pi")
try:
plt.savefig("limber_power.png")
except:
pass | [
"apetri@phys.columbia.edu"
] | apetri@phys.columbia.edu |
bcc5aaa88ec03798f57cbf2092d41e69f6d8be0c | e4806fe953cbb76a6baf1f27ae40562561014f36 | /labravel/MIS_SYS/MAIN.py | ab1d0b37968eeddf9ae34f447d78387237cb0f3c | [] | no_license | LaBravel/Tedu-code | 701daa5f49ab42129db0a4684c8e7b3cbcbe1d65 | 9c93e9d88e940e627c7a3d1e8c2519035b462086 | refs/heads/master | 2020-04-27T16:33:55.535261 | 2019-03-08T07:27:29 | 2019-03-08T07:27:29 | 174,486,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | from MENU import *
from STUDENT import *
def main():
DATE = []
while 1 :
show_menu()
choice = input('要进行什么操作?')
if choice == '1' :
input_student(DATE)
elif choice == '2' :
output_student(DATE)
elif choice == '3' :
delete_student(DATE)
elif choice == '4' :
edit_student(DATE)
elif choice == '5' :
sorted_scores_student(DATE,True)
elif choice == '6' :
sorted_scores_student(DATE,False)
elif choice == '7' :
sorted_ages_student(DATE,True)
elif choice == '8' :
sorted_ages_student(DATE,False)
elif choice == '9' :
load_info_student(DATE)
elif choice == '10' :
save_info_student(DATE)
elif choice == 'q' or choice == 'Q' :
break
else :
print('没有这项操作!')
main()
| [
"463662798@qq.com"
] | 463662798@qq.com |
2967eddce5d6006864ec854b7c7f9d7c3e829f9e | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/flowprofile.py | ea7288f4d656a7167aabf2db92581bc9dd90f409 | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 4,008 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FlowProfile(Base):
"""Flow Range Profile
The FlowProfile class encapsulates a required flowProfile resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'flowProfile'
def __init__(self, parent):
super(FlowProfile, self).__init__(parent)
@property
def MatchAction(self):
"""An instance of the MatchAction class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.matchaction.MatchAction)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.matchaction import MatchAction
return MatchAction(self)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
def update(self, Name=None):
"""Updates a child instance of flowProfile on the server.
Args:
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def AddFromTemplate(self, *args, **kwargs):
"""Executes the addFromTemplate operation on the server.
Creates a Match Action prototype supported by the template.
addFromTemplate(Arg2:href)
Args:
args[0] is Arg2 (str(None|/api/v1/sessions/1/ixnetwork/?deepchild=*)):
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('addFromTemplate', payload=payload, response_object=None)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
2d04d274781ae60c5385e020cc01d8fc1c3b99bc | 9f7c9201b86128d2459e463d3bb1c60b7e434a78 | /examples/tree_benchmark.py | ea39ec0d7c7b9965ca1cf840687150bca077ec9e | [] | no_license | jackd/deep-cloud | b4a171a290c22a113b8a6dd3a49c875afae84b93 | 9adb25bfcdfd1f2faf3820378cc27a952aa90f9d | refs/heads/master | 2020-07-05T19:32:38.824051 | 2019-10-25T04:04:43 | 2019-10-25T04:04:43 | 202,748,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from time import time
from tqdm import tqdm
import tensorflow_datasets as tfds
from deep_cloud.problems.partnet import PartnetProblem
from deep_cloud.ops.np_utils.tree_utils import pykd
problem = PartnetProblem()
warm_up = 5
benchmark = 10
total = warm_up + benchmark
tree_impl = pykd.KDTree
all_coords = []
for coords, _ in tqdm(tfds.as_numpy(
problem.get_base_dataset('validation').take(total)),
total=total,
desc='getting base data...'):
tree = tree_impl(coords)
dists, indices = tree.query(tree.data, 2, return_distance=True)
del indices
scale = np.mean(dists[:, 1])
coords *= 2 / scale
all_coords.append(coords)
def run_fn(f, data, name):
for i in tqdm(range(warm_up), desc='warming up {}'.format(name)):
f(data[i])
t = time()
for i in tqdm(range(warm_up, total), desc='benchmarking {}'.format(name)):
f(data[i])
dt = time() - t
print('{} runs took {} ms, {} ms / run'.format(benchmark, dt * 1000,
dt * 1000 / benchmark))
trees = [tree_impl(c) for c in all_coords]
def query_tree(tree):
tree.query_ball_point(tree.data, 4, approx_neighbors=16)
run_fn(tree_impl, all_coords, 'just tree')
run_fn(query_tree, trees, 'just query')
run_fn(lambda c: query_tree(tree_impl(c)), all_coords, 'compute both')
| [
"thedomjack@gmail.com"
] | thedomjack@gmail.com |
b273b59111c729a742b4aba94c9189dbef82690c | b16bc512603cbe3bdc5a56586cfc9147fe5fb3f6 | /venv/bin/rst2latex.py | 944a30a66b8d95f943a166fadb0372224d4a4a08 | [] | no_license | hoang-ho/TechTogether | caa565b14165c7b0889bd4232098e16a0137ba67 | fa4ca8375ab00d1791d2fce02384503eff5df7e0 | refs/heads/master | 2020-05-01T08:24:22.561868 | 2019-05-13T06:55:46 | 2019-05-13T06:55:46 | 177,377,979 | 2 | 2 | null | 2019-05-13T06:55:47 | 2019-03-24T06:15:31 | Python | UTF-8 | Python | false | false | 829 | py | #!/Users/hoangho/TechTogether/TTB_Backup/venv/bin/python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"hoangho@Hoangs-MacBook-Pro.local"
] | hoangho@Hoangs-MacBook-Pro.local |
1defbd42fd03ec6153cd47a3c7858fd4ba026f91 | 360ff148d658caf1736ae159954c928d2ce545f7 | /alembic/env.py | 019c1b1fff7cee38417508e2ad8a39313905a451 | [
"MIT"
] | permissive | beanjo55/KerbalStuff | 3305401122186a692a345a6a0a5fad63f8eb864c | 18e8c517b6f79c2839236a9507464ab0987f103e | refs/heads/master | 2021-01-18T11:11:04.293762 | 2016-02-17T12:10:21 | 2016-02-17T12:10:21 | 51,928,777 | 1 | 0 | null | 2016-02-17T14:34:06 | 2016-02-17T14:34:06 | null | UTF-8 | Python | false | false | 2,076 | py | from __future__ import with_statement
import os, sys
sys.path.append(os.getcwd())
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
from KerbalStuff import app
from KerbalStuff.objects import Base
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| [
"sir@cmpwn.com"
] | sir@cmpwn.com |
3800a32637fe1d6fdc6c62820da488f167181ae2 | 8f48d12b88048e424ebb0d72ca6dfab5cf12ae0f | /0001_0599/349.py | fd262223f7f0e12f87b1a1d0b8c093a9eedd4ba2 | [] | no_license | renjieliu/leetcode | e1caf13c18a8107ed9252588b339fb76bcb1b246 | 4668b64fcb9320b6c316d8608fc61911ce43b6c7 | refs/heads/master | 2023-03-18T18:16:06.187741 | 2023-03-14T20:31:59 | 2023-03-14T20:31:59 | 128,823,819 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | def intersection(nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
map = {}
output = []
for i in nums1:
map[i] = "a"
for j in nums2:
if map.get(j, "None")!="None":
map[j] = "b"
for k, v in map.items():
if v == "b":
output.append(k)
return output
print(intersection([1, 2, 2, 1], [2, 2,2,2,2,1]))
| [
"anlrj@qq.com"
] | anlrj@qq.com |
728e2b1d2f05db0e391e7186e865451f25c2215e | ca0ffc5606da190274569e3d6ced2543af187fa9 | /get_workday.py | 06f8c01b9e6d486f740ea889b72de0ec804429bc | [] | no_license | littlelienpeanut/TBrain_ETF_prediction | f3a3db045e5274bfca56e28e373e98fa31c1ce67 | c9f2d19e2f97a67cd923928c4b87ffc53c274704 | refs/heads/master | 2020-03-18T01:42:25.374235 | 2018-06-27T16:17:06 | 2018-06-27T16:17:06 | 134,155,671 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | import pandas as pd
import datetime
import csv
def main():
df = pd.read_csv('../TBrain_Round2_DataSet_20180518/tetfp.csv', encoding = 'utf8')
fname = ['50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6201', '6203', '6204', '6208', '690', '692', '701', '713']
date_title = []
data_v1 = {}
#data['日期']['代碼'][30.41, 30.53, 30.18, 30.45, 6374]
for fe in fname:
date_title = []
for row in range(len(df)):
print(str(row) + '/' + str(len(df)))
if int(df['代碼'][row]) == int(fe):
if df['日期'][row] in date_title:
pass
else:
date_title.append(df['日期'][row])
with open('../stock_workday/' + fe + '_workday.csv', 'w', newline='') as fout:
wr = csv.writer(fout)
title = ['date']
wr.writerow(title)
for date in date_title:
value = []
value.append(date)
wr.writerow(value)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | littlelienpeanut.noreply@github.com |
d211ed69c45c9616969fa79f815d55d5f3412cca | 9ea80adb9252c0e67e5e20c3ff9d6a08cf4a172d | /mxonline/apps/courses/migrations/0007_auto_20190517_1615.py | 574173276b261a61cc8699af045aca93332ac58e | [] | no_license | hfxjd9527/djangoweb | 11830fbbaab0d4986b7494c61ac23d7f19266b67 | 1d83c423755b357eb178cc4f384829082623d2e0 | refs/heads/master | 2022-12-10T02:20:43.569239 | 2019-06-04T14:31:18 | 2019-06-04T14:39:11 | 185,593,356 | 0 | 0 | null | 2022-12-08T00:46:58 | 2019-05-08T11:34:07 | HTML | UTF-8 | Python | false | false | 678 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-17 16:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0006_video_url'),
]
operations = [
migrations.AddField(
model_name='video',
name='learn_times',
field=models.IntegerField(default=0, verbose_name='学习时长(分钟数)'),
),
migrations.AlterField(
model_name='course',
name='learn_times',
field=models.IntegerField(default=0, verbose_name='学习时长(分钟数)'),
),
]
| [
"1725824530@qq.com"
] | 1725824530@qq.com |
350ed4b6348456ac8bf7a9bdab5919e1a7dec755 | 6e5ab77fee1fb4a0310213dd8c6dd8601828b1b9 | /Algorithm/문제/수업/D-13t/AD/[TST] 책꽂이.py | 4c6bb04f51b13f2fef4ab0a48e707a342aa6520b | [] | no_license | hongyong3/TIL | 36d031c0da9e3e6db3eebb977bd3e12df00a849f | 7f1492128e957a78fc95b255f4f7f2978161e471 | refs/heads/master | 2023-08-19T09:16:03.231757 | 2023-08-18T09:38:47 | 2023-08-18T09:38:47 | 162,100,258 | 1 | 0 | null | 2023-02-11T00:52:32 | 2018-12-17T08:42:42 | Jupyter Notebook | UTF-8 | Python | false | false | 711 | py | import sys
sys.stdin = open("[TST] 책꽂이_input.txt", "r")
<<<<<<< HEAD
def bookshelf(idx, sums):
global minn
if sums - B >= minn:
return
if idx == N:
if 0 <= sums - B <= minn - 1:
minn = sums - B
return
bookshelf(idx + 1, sums)
bookshelf(idx + 1, sums + data[idx])
T = int(input())
for test_case in range(T):
N, B = map(int, input().split())
data = [int(input()) for _ in range(N)]
minn = float('inf')
bookshelf(0, 0)
print(minn)
=======
T = int(input())
for test_case in range(T):
N, B = map(int, input().split())
H_i = [list(input()) for _ in range(N)]
print(H_i)
>>>>>>> 99bd1460f81cfb751d4cdfaea01f0fae18e6c33c
| [
"chy66822495@gmail.com"
] | chy66822495@gmail.com |
f393255f7aea9000b5a00c844b41886415fd2e91 | 20e3ee6642d20578e48756963798acfe307ac6b5 | /Practice/hackerrank/Certifications/Python (Basics)/01AverageFunction.py | e85fad181b9c07a0a2736dbbd750cea5f4a290e4 | [] | no_license | sirinenisaikiran/Python | 538f64276767435de3233b720f547aac0bf4d511 | bdfef0d1c04c7f3b9fc91a164b5fd1789828176c | refs/heads/master | 2023-01-31T00:53:01.650916 | 2021-06-06T10:39:20 | 2021-06-06T10:39:20 | 237,744,104 | 0 | 0 | null | 2023-01-26T03:38:47 | 2020-02-02T08:58:49 | Python | UTF-8 | Python | false | false | 179 | py | def avg(List):
sum = 0
for i in List:
sum += int(i)
return (sum/len(List))
nums = input()
List = nums.split(' ')
Res = avg(List)
print("{:0.2f}".format(Res)) | [
"saikiran.sirneni@gmail.com"
] | saikiran.sirneni@gmail.com |
efe190f572281fda60f87458816984aefd501e95 | 29783ede1402f93bee06cbc899c41a48911e1285 | /portrait/import/NameAlignImport2Neo.py | 0632ec363abc75e5d619cc814b7cfe7660d9a381 | [
"BSD-2-Clause"
] | permissive | Allen517/alignment | 389f3dd4ff4b2bd6785ecee009f46e589f3b07f5 | a0bfe29b229182634d85b8b383767e7eda8fc2af | refs/heads/master | 2021-01-15T17:59:26.106980 | 2019-01-28T08:18:04 | 2019-01-28T08:18:04 | 99,768,785 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,002 | py | # -*- coding:utf8 -*-
import sys
sys.path.append("../../")
from portrait.DB.GraphdbClient import GraphdbClient
from portrait.DB.MongodbClient import MongodbClient
from portrait.utils.GetConfig import GetConfig
from portrait.utils.utilFunction import unicode2utf8
import json
from portrait.utils.LogHandler import LogHandler
from py2neo import Node
import uuid
class NameAlignImport2Neo(object):
def __init__(self):
self.config = GetConfig()
self.graphdb = GraphdbClient()
self.graphdb.setDatabase(self.config.graphdb_host, self.config.graphdb_port, \
self.config.graphdb_user, self.config.graphdb_password)
self.doubandb = MongodbClient()
self.doubandb.setDatabase(self.config.doubandb_host, self.config.doubandb_port, \
self.config.doubandb_name, self.config.doubandb_tab)
self.weibodb = MongodbClient()
self.weibodb.setDatabase(self.config.weibodb_host, self.config.weibodb_port, \
self.config.weibodb_name, self.config.weibodb_tab)
self.logger = LogHandler('name_align_import2neo')
def graphdb_transaction(func):
def wrapper(self, douban_uid_set, weibo_uid_set):
graphdb_tx = self.graphdb.graph.begin()
func(self, douban_uid_set, weibo_uid_set)
graphdb_tx.commit()
return wrapper
@graphdb_transaction
def __relation_data_storage(self, douban_uid_set, weibo_uid_set):
if len(douban_uid_set)!=len(weibo_uid_set):
self.logger.warning(u'The length of douban_uid_set and weib_uid_set is not equal. \
The processed batch is skipped.')
self.logger.warning(douban_uid_set)
return
for k in range(len(douban_uid_set)):
douban_uid = douban_uid_set[k]
weibo_uids = weibo_uid_set[k]
douban_info = self.doubandb.get({'uid':douban_uid}) # get user info from doubandb
weibo_infos = self.weibodb.search('uid', weibo_uids, '$in') # get user infos from weibodb
if '_id' in douban_info: # remove automatically generated key '_id' with type of ObjectId
douban_info.pop('_id')
# set and store graph node of douban
user_graph_node = None
if 'id' in douban_info:
douban_grpah_node = self.graphdb.insert_or_update_node('Douban', douban_info['id'], douban_info)
# use existed user node or generate new user node in graphdb
if not user_graph_node:
user_graph_node = self.graphdb.find_node_by_rel('Douban', {'id':"='{}'".format(\
douban_grpah_node['id'])}, 'HAS')
if user_graph_node:
user_graph_node = user_graph_node[0]
if not user_graph_node:
user_graph_node = self.graphdb.insert_or_update_node('User', uuid.uuid1().get_hex())
self.graphdb.insert_or_update_relation('HAS', user_graph_node, douban_grpah_node)
# set and store graph node of weibo
if weibo_infos:
for weibo_info in weibo_infos:
if 'uid' in weibo_info:
weibo_graph_node = self.graphdb.insert_or_update_node('Weibo', \
weibo_info['uid'], weibo_info)
# store relationship in neo4j
self.graphdb.insert_or_update_relation('ALIGN', douban_grpah_node, \
weibo_graph_node, {'ID':1.})
# use existed user node or generate new user node in graphdb
if not user_graph_node:
user_graph_node = self.graphdb.find_node_by_rel('Weibo', {'uid':"='{}'".format(\
douban_grpah_node['uid'])}, 'HAS')
if not user_graph_node:
user_graph_node = self.graphdb.insert_or_update_node('User', uuid.uuid1().get_hex())
self.graphdb.insert_or_update_relation('HAS', user_graph_node, weibo_graph_node)
def storeDoubanName(self, file_name, batch_proc_num):
with open(file_name, 'aw') as wrtF:
skip_num = 0
while(True):
query_res = self.doubandb.getAll(batch_proc_num, skip_num)
query_num = 0
vals = list()
for douban_res in query_res:
query_num += 1
if 'id' in douban_res:
graph_res = self.graphdb.find_node_by_id("Douban", "douban_{}".format())
if graph_res:
continue
if 'uid' in douban_res and 'name' in douban_res \
and 'desc' in douban_res and 'loc_name' in douban_res:
vals.append({'uid': douban_res['uid'], 'name':douban_res['name'], \
'desc':douban_res['desc'], 'loc_name':douban_res['loc_name']})
if not query_num:
break
for v in vals:
wrtF.write(json.dumps(v, ensure_ascii=False).decode('utf8')+'\t')
self.logger.info('已存储%d条豆瓣数据至本地'%skip_num+query_num)
skip_num += batch_proc_num
def relation_data_finder(self, batch_proc_num):
skip_num = 0
while(True):
# 1. get weibo data from mongo
weibo_query_res = self.weibodb.getAll(batch_proc_num, skip_num)
query_num = 0
for weibo_res in weibo_query_res:
query_num += 1
weibo_res_name = weibo_res['nick_name']
if not query_num: # no results
break
skip_num += batch_proc_num
# 1. get relationships
rels = self.__get_rel()
# 2.1 initialization
proc_num = 0
douban_uid_set = tuple()
weibo_uid_set = list()
# 2.2 start to process relationships
rels = self.__get_rel()
for rel in rels:
proc_num += 1
# 2.3 if processing the max of batch size, find user infos from mongodb
if proc_num%batch_proc_num==0:
self.__relation_data_storage(douban_uid_set, weibo_uid_set)
self.logger.info(u'Already processing %d alignment records'%proc_num)
douban_uid_set = tuple()
weibo_uid_set = list()
# 2.2 fetch douban_uid and weibo_uids from current relationship info
douban_uid = ""
weibo_uids = tuple()
if "doubanId" in rel:
douban_uid = rel["doubanId"]
if "weiboIds" in rel:
for weibo_id_info in rel["weiboIds"]:
if "uid" in weibo_id_info:
weibo_uids += unicode2utf8(weibo_id_info['uid']),
douban_uid_set += unicode2utf8(douban_uid),
weibo_uid_set.append(weibo_uids)
self.__relation_data_storage(douban_uid_set, weibo_uid_set)
self.logger.info(u'Done! Already processing %d alignment records'%proc_num)
if __name__=='__main__':
data2neo = NameAlignImport2Neo()
data2neo.storeDoubanName('douban_tmp', 10) | [
"wangyongqing.casia@gmail.com"
] | wangyongqing.casia@gmail.com |
d609ad3178c98a7b86540b44afa002b47acc664a | 409270c19919496c4083c2c620c52207b1d29ca3 | /Day 41/Solution 1.py | b4e32e45632235670e91c02991f446f68fe42798 | [] | no_license | rayandasoriya/100DaysOfCode | ec275b90c33efd6e354c712b10cf9b5ae3ef3382 | 3158b69f6bf1a13604e36662817ab80c582df557 | refs/heads/master | 2020-03-31T09:20:45.329067 | 2019-01-31T16:16:04 | 2019-01-31T16:16:04 | 152,092,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | class Solution(object):
def subsets(self, nums):
nums.sort()
result = [[]]
for num in nums:
result += [i + [num] for i in result]
return result
| [
"dasoriyarayan@gmail.com"
] | dasoriyarayan@gmail.com |
ca8241aedb354d8af933ca4f71388f2d3f4e7420 | 27cb9cc771ffa02c4f7e12dcd4688e311c63aace | /fairseq/modules/sinusoidal_positional_embedding.py | 81324965f7c7d3088d215e3e147ece7b845e74d0 | [
"MIT"
] | permissive | periclesmiranda/TSPNet | 78aee61a4e4497ae82b1bb6731a6edd6230720cd | 8f71315486c78b540382ef6420eab5441333bcda | refs/heads/main | 2023-07-19T16:06:48.169045 | 2021-09-10T15:08:36 | 2021-09-10T15:08:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,985 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Optional
import torch
import torch.onnx.operators
from fairseq import utils
from torch import Tensor, nn
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.onnx_trace = False
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.max_positions = int(1e5)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
if self.onnx_trace:
return (
self.weights.index_select(index=self.padding_idx + pos, dim=0)
.unsqueeze(1)
.repeat(bsz, 1, 1)
)
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
(bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
)
return embeddings
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
| [
"chenchen.xu@anu.edu.au"
] | chenchen.xu@anu.edu.au |
48405bbbb1645e364d550e79258965c7757dafd6 | e7290064b5df4731167bab10606f451b446a21f7 | /rllib/execution/buffers/mixin_replay_buffer.py | bf23abdf6c108b7dd0b79e05dc5cd1dbf09d6844 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | sven1977/ray | dce9f6fa114741837341f14aef0a8c64c442aba6 | b73a496af19bce627a611e7af2cb02a3c5d99684 | refs/heads/master | 2023-09-02T00:57:47.167794 | 2023-08-17T09:33:04 | 2023-08-17T09:33:04 | 229,269,728 | 2 | 5 | Apache-2.0 | 2023-07-29T07:08:41 | 2019-12-20T13:27:01 | Python | UTF-8 | Python | false | false | 7,564 | py | import collections
import platform
import random
from typing import Optional
from ray.util.timer import _Timer
from ray.rllib.execution.replay_ops import SimpleReplayBuffer
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, concat_samples
from ray.rllib.utils.deprecation import Deprecated
from ray.rllib.utils.replay_buffers.multi_agent_replay_buffer import ReplayMode
from ray.rllib.utils.replay_buffers.replay_buffer import _ALL_POLICIES
from ray.rllib.utils.typing import PolicyID, SampleBatchType
class MixInMultiAgentReplayBuffer:
"""This buffer adds replayed samples to a stream of new experiences.
- Any newly added batch (`add()`) is immediately returned upon
the next `replay` call (close to on-policy) as well as being moved
into the buffer.
- Additionally, a certain number of old samples is mixed into the
returned sample according to a given "replay ratio".
- If >1 calls to `add()` are made without any `replay()` calls
in between, all newly added batches are returned (plus some older samples
according to the "replay ratio").
Examples:
>>> from ray.rllib.execution.replay_buffer import MixInMultiAgentReplayBuffer
# replay ratio 0.66 (2/3 replayed, 1/3 new samples):
>>> buffer = MixInMultiAgentReplayBuffer(capacity=100, # doctest: +SKIP
... replay_ratio=0.66) # doctest: +SKIP
>>> A, B, C, D = ... # doctest: +SKIP
>>> buffer.add(A) # doctest: +SKIP
>>> buffer.add(B) # doctest: +SKIP
>>> buffer.replay() # doctest: +SKIP
[A, B, B]
>>> buffer.add(C) # doctest: +SKIP
>>> buffer.replay() # doctest: +SKIP
[C, A, B]
>>> # or: [C, A, A] or [C, B, B], but always C as it
>>> # is the newest sample
>>> buffer.add(D) # doctest: +SKIP
>>> buffer.replay() # doctest: +SKIP
[D, A, C]
>>> # replay proportion 0.0 -> replay disabled:
>>> from ray.rllib.execution import MixInReplay
>>> buffer = MixInReplay(capacity=100, replay_ratio=0.0) # doctest: +SKIP
>>> buffer.add(A) # doctest: +SKIP
>>> buffer.replay() # doctest: +SKIP
[A]
>>> buffer.add(B) # doctest: +SKIP
>>> buffer.replay() # doctest: +SKIP
[B]
"""
def __init__(
self,
capacity: int,
replay_ratio: float,
replay_mode: ReplayMode = ReplayMode.INDEPENDENT,
):
"""Initializes MixInReplay instance.
Args:
capacity: Number of batches to store in total.
replay_ratio: Ratio of replayed samples in the returned
batches. E.g. a ratio of 0.0 means only return new samples
(no replay), a ratio of 0.5 means always return newest sample
plus one old one (1:1), a ratio of 0.66 means always return
the newest sample plus 2 old (replayed) ones (1:2), etc...
"""
self.capacity = capacity
self.replay_ratio = replay_ratio
self.replay_proportion = None
if self.replay_ratio != 1.0:
self.replay_proportion = self.replay_ratio / (1.0 - self.replay_ratio)
if replay_mode in ["lockstep", ReplayMode.LOCKSTEP]:
self.replay_mode = ReplayMode.LOCKSTEP
elif replay_mode in ["independent", ReplayMode.INDEPENDENT]:
self.replay_mode = ReplayMode.INDEPENDENT
else:
raise ValueError("Unsupported replay mode: {}".format(replay_mode))
def new_buffer():
return SimpleReplayBuffer(num_slots=capacity)
self.replay_buffers = collections.defaultdict(new_buffer)
# Metrics.
self.add_batch_timer = _Timer()
self.replay_timer = _Timer()
self.update_priorities_timer = _Timer()
# Added timesteps over lifetime.
self.num_added = 0
# Last added batch(es).
self.last_added_batches = collections.defaultdict(list)
def add(self, batch: SampleBatchType) -> None:
"""Adds a batch to the appropriate policy's replay buffer.
Turns the batch into a MultiAgentBatch of the DEFAULT_POLICY_ID if
it is not a MultiAgentBatch. Subsequently adds the individual policy
batches to the storage.
Args:
batch: The batch to be added.
"""
# Make a copy so the replay buffer doesn't pin plasma memory.
batch = batch.copy()
batch = batch.as_multi_agent()
with self.add_batch_timer:
if self.replay_mode == ReplayMode.LOCKSTEP:
# Lockstep mode: Store under _ALL_POLICIES key (we will always
# only sample from all policies at the same time).
# This means storing a MultiAgentBatch to the underlying buffer
self.replay_buffers[_ALL_POLICIES].add_batch(batch)
self.last_added_batches[_ALL_POLICIES].append(batch)
else:
# Store independent SampleBatches
for policy_id, sample_batch in batch.policy_batches.items():
self.replay_buffers[policy_id].add_batch(sample_batch)
self.last_added_batches[policy_id].append(sample_batch)
self.num_added += batch.count
def replay(
self, policy_id: PolicyID = DEFAULT_POLICY_ID
) -> Optional[SampleBatchType]:
if self.replay_mode == ReplayMode.LOCKSTEP and policy_id != _ALL_POLICIES:
raise ValueError(
"Trying to sample from single policy's buffer in lockstep "
"mode. In lockstep mode, all policies' experiences are "
"sampled from a single replay buffer which is accessed "
"with the policy id `{}`".format(_ALL_POLICIES)
)
buffer = self.replay_buffers[policy_id]
# Return None, if:
# - Buffer empty or
# - `replay_ratio` < 1.0 (new samples required in returned batch)
# and no new samples to mix with replayed ones.
if len(buffer) == 0 or (
len(self.last_added_batches[policy_id]) == 0 and self.replay_ratio < 1.0
):
return None
# Mix buffer's last added batches with older replayed batches.
with self.replay_timer:
output_batches = self.last_added_batches[policy_id]
self.last_added_batches[policy_id] = []
# No replay desired -> Return here.
if self.replay_ratio == 0.0:
return concat_samples(output_batches)
# Only replay desired -> Return a (replayed) sample from the
# buffer.
elif self.replay_ratio == 1.0:
return buffer.replay()
# Replay ratio = old / [old + new]
# Replay proportion: old / new
num_new = len(output_batches)
replay_proportion = self.replay_proportion
while random.random() < num_new * replay_proportion:
replay_proportion -= 1
output_batches.append(buffer.replay())
return concat_samples(output_batches)
def get_host(self) -> str:
"""Returns the computer's network name.
Returns:
The computer's networks name or an empty string, if the network
name could not be determined.
"""
return platform.node()
@Deprecated(new="MixInMultiAgentReplayBuffer.add()", error=False)
def add_batch(self, *args, **kwargs):
return self.add(*args, **kwargs)
| [
"noreply@github.com"
] | sven1977.noreply@github.com |
281437822c00a3b68c1c21b7d258fc68af90bd8c | 99f222d31e66da026cd284c390ef487d6e8a0270 | /core/experiments/plot_results_split_and_fit.py | 3dad50caee5489c2a8d9a8fe227e425812bff7bd | [] | no_license | dallascard/textile | 0e831b56978654f820de47f5145b7aabab48154e | 814ae148a0d7ca2ab47dd07c51ca42835717b9f2 | refs/heads/master | 2021-01-23T02:39:41.827570 | 2018-03-28T04:55:45 | 2018-03-28T04:55:45 | 86,009,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,197 | py | import os
import re
from optparse import OptionParser
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..util import file_handling as fh
def main():
usage = "%prog csv_results_files"
parser = OptionParser(usage=usage)
parser.add_option('--prefix', dest='prefix', default=None,
help='Output prefix (optional): default=%default')
parser.add_option('--similar', action="store_true", dest="similar", default=False,
help='Only use the most similar examples: default=%default')
parser.add_option('--different', action="store_true", dest="different", default=False,
help='Only use the most different examples: default=%default')
parser.add_option('--balanced', action="store_true", dest="balanced", default=False,
help='Only use the most balanced examples: default=%default')
parser.add_option('--unbalanced', action="store_true", dest="unbalanced", default=False,
help='Only use the most unbalanced examples: default=%default')
(options, args) = parser.parse_args()
files = args
n_files = len(files)
use_most_similar = options.similar
use_least_similar = options.different
use_balanced = options.balanced
use_unbalanced = options.unbalanced
output = options.prefix
rows = ['train', 'CC', 'PCC', 'ACC_internal', 'MS_internal', 'PCC_platt2']
values = {}
for row in rows:
values[row] = {}
df = None
mae_values = None
train_estimates = []
train_maes = []
for f_i, f in enumerate(files):
print(f)
#comp = re.sub('_2011', '_cshift_2011', f)
#if not os.path.exists(comp):
# print("Can't find %s" % comp)
n_files += 1
df_f = fh.read_csv_to_df(f)
n_rows, n_cols = df_f.shape
if mae_values is None:
df = df_f
mae_values = np.zeros([n_rows, n_files-1])
mae_values[:, f_i] = df_f['MAE'].values
train_estimates.append(df_f.loc['train', 'estimate'])
train_maes.append(df_f.loc['train', 'MAE'])
n_train = int(df_f.loc['train', 'N'])
if n_train not in values['CC']:
for row in rows:
values[row][n_train] = []
for row in rows:
values[row][n_train].append(df_f.loc[row, 'MAE'])
print("%d files" % len(files))
df = pd.DataFrame(mae_values, index=df.index)
most_similar = train_maes < np.mean(train_maes)
least_similar = train_maes > np.mean(train_maes)
train_unalancedness = np.abs(np.array(train_estimates) - 0.5)
most_balanced = train_unalancedness < np.mean(train_unalancedness)
least_balanced = train_unalancedness > np.mean(train_unalancedness)
selector = np.array(np.ones(len(most_similar)), dtype=bool)
if use_most_similar:
selector *= most_similar
if use_least_similar:
selector *= least_similar
if use_balanced:
selector *= most_balanced
if use_unbalanced:
selector *= least_balanced
df = pd.DataFrame(df.values[:, selector], index=df.index)
print(df.mean(axis=1))
print(df.std(axis=1))
if output is not None:
df.to_csv(output + '.csv')
df.mean(axis=1).to_csv(output + '_mean.csv')
"""
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1.0, len(rows)))
fig, ax = plt.subplots()
for r_i, row in enumerate(rows):
means = []
groups = list(values[row].keys())
groups.sort()
for group in groups:
points = values[row][group]
n_points = len(points)
ax.scatter(np.ones(n_points)*group + r_i*8, points, color=colors[r_i], s=5, alpha=0.5)
means.append(np.mean(points))
if row == 'train':
ax.plot(groups, means, linestyle='dashed', color=colors[r_i], label=row, alpha=0.5)
else:
ax.plot(groups, means, color=colors[r_i], label=row, alpha=0.5)
ax.legend()
if output is not None:
plt.savefig(output + '.pdf', bbox_inches='tight')
"""
if __name__ == '__main__':
main()
| [
"dcard@andrew.cmu.edu"
] | dcard@andrew.cmu.edu |
79c2f4eaae72715bc279a2eeed0138317f74f449 | 69bbe2729b178de19938d2be17fff29f99d67f6d | /question-type-find-hum-classifier-builder.py | 4711b71070d861ac64c061c854991d4bd813319d | [] | no_license | imclab/QuestionTypeClassifier | 0b6b51e2e9a85fdb0f61e0f814bca63147fe8bd7 | 20c25dda8ba0b38c3f74aa2914484380f4dd9394 | refs/heads/master | 2021-01-22T05:10:02.025554 | 2014-01-11T19:50:18 | 2014-01-11T19:50:18 | 17,212,625 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,311 | py | #!/usr/bin/env python
"""
done in 2539.012s
Best score: 0.936
Best parameters set:
clf__alpha: 0.0001
clf__n_iter: 80
clf__penalty: 'elasticnet'
tfidf__norm: 'l2'
tfidf__use_idf: False
vect__max_df: 0.75
vect__max_features: None
vect__ngram_range: (1, 2)
vect__stop_words: None
"""
__author__ = 'gavin hackeling'
__email__ = 'gavinhackeling@gmail.com'
import os
from time import time
import pickle
from pprint import pprint
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
def grid_search():
os.chdir('/home/gavin/PycharmProjects/question-type-classifier/corpora/')
stop_words = [l.strip() for l in open('stop-words.txt', 'rb')]
categories = ['desc', 'gr', 'ind', 'title']
train = load_files('fine/HUM', categories=categories, shuffle=True, random_state=42)
X, y = train.data, train.target
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
parameters = {
'vect__stop_words': ('english', stop_words, None),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
'tfidf__use_idf': (True, False),
'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
'clf__n_iter': (10, 50, 80),
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
t0 = time()
print 'Performing grid search...'
print 'pipeline:', [name for name, _ in pipeline.steps]
print 'parameters:'
pprint(parameters)
grid_search.fit(X, y)
print 'done in %0.3fs' % (time() - t0)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
def build_model():
os.chdir('/home/gavin/PycharmProjects/question-type-classifier/corpora/')
categories = ['desc', 'gr', 'ind', 'title']
train = load_files('fine/HUM', categories=categories, shuffle=True, random_state=42)
X, y = train.data, train.target
pipeline = Pipeline([
('vect', CountVectorizer(max_df=0.75, ngram_range=(1, 2), stop_words=None)),
('tfidf', TfidfTransformer(norm='l2', use_idf=False)),
('clf', SGDClassifier(n_iter=80, penalty='elasticnet', alpha=0.0001)),
])
X_train, X_test, y_train, y_test = train_test_split(train.data, train.target, test_size=0.25, random_state=42)
pipeline.fit(X_train, y_train)
print 'classifier score:', pipeline.score(X_test, y_test)
pipeline.fit(X, y)
filehandler = open('fine-hum-classifier.p', 'wb')
pickle.dump(pipeline, filehandler)
filehandler.close()
if __name__ == '__main__':
grid_search()
#build_model()
| [
"gavinhackeling@gmail.com"
] | gavinhackeling@gmail.com |
beaf48337d505ec8a471b710b101c24259801287 | 14f4d045750f7cf45252838d625b2a761d5dee38 | /argo/test/test_io_k8s_api_core_v1_endpoint_port.py | ee2bad7e450dd854eee83b22b6081f6c99cc0309 | [] | no_license | nfillot/argo_client | cf8d7413d728edb4623de403e03d119fe3699ee9 | c8cf80842f9eebbf4569f3d67b9d8eff4ba405fa | refs/heads/master | 2020-07-11T13:06:35.518331 | 2019-08-26T20:54:07 | 2019-08-26T20:54:07 | 204,546,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo
from models.io_k8s_api_core_v1_endpoint_port import IoK8sApiCoreV1EndpointPort # noqa: E501
from argo.rest import ApiException
class TestIoK8sApiCoreV1EndpointPort(unittest.TestCase):
"""IoK8sApiCoreV1EndpointPort unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiCoreV1EndpointPort(self):
"""Test IoK8sApiCoreV1EndpointPort"""
# FIXME: construct object with mandatory attributes with example values
# model = argo.models.io_k8s_api_core_v1_endpoint_port.IoK8sApiCoreV1EndpointPort() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"nfillot@weborama.com"
] | nfillot@weborama.com |
5495e799345a5df9d390f7c1e4773d9ae425c11b | ea713f1ea60829898e457ef39693f1ea8d14047a | /workbase/ws_sc/ws_sc/spiders/bqg_spider.py | 67e574f4977ac9f206bb47bdee7532c091ed5757 | [] | no_license | freeflyfish/bqhr | 2ea7220569780c033536587591a40fb6fb82d394 | d6cc82697b843a83826ed278aede4117822a818d | refs/heads/master | 2020-04-30T15:03:55.205319 | 2018-03-12T06:58:59 | 2018-03-12T06:58:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | # -*- coding:utf-8 -*-
import time
import scrapy
from scrapy.http import Request
from selenium import webdriver
class DLDLSpider(scrapy.Spider):
name = 'bqg'
allowed_domains = ["biquge.com"]
start_urls = ["http://www.biquge.info/10_10218/5001527.html"]
# username = '18688983498'
# password = 'pm988311'
# cark = '6228480128558663877'
path = 'E:\\xiaoshuo\dldl\\'
def parse(self, response):
title = response.xpath('//h1/text()').extract_first()
content_list = response.xpath('//div[@id="content"]/text()').extract()
page_next = response.xpath('//a/@href').extract()[37]
con = ''
if content_list:
for x in content_list:
con += x.replace('\r', '').replace('\n', '').replace('\xa0', '') + '\n'
with open(self.path+title+'.txt', 'w') as f:
f.write(con)
f.close()
if page_next:
yield Request(page_next, callback=self.parse, dont_filter=True) | [
"380784649@qq.com"
] | 380784649@qq.com |
d64db6766ff616af587c803676cd543d66ea5af3 | 9c50d2310d026583fc32720b2cf59f8a8679a3f1 | /base/checkconfig.py | 40d97e4741acaf64c4a85628067987f3206ca275 | [] | no_license | songhongbao/Ran | 6d69bfc93433bdcdfe01f9b542dd626bb188a00d | 942b767e2492283d0c3ade259261de17d2dee7ff | refs/heads/master | 2020-06-15T06:29:35.927142 | 2015-11-19T10:57:19 | 2015-11-19T10:57:19 | 33,342,851 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,604 | py | # -*- coding: utf-8 -*-
import re
import os
import sys
class Conf():
_config = dict()
_error_msg = ''
_task_propertys = ['progress', 'thread']
_task_list = []
#deal ran config
#ran config only support strict pattern: key=value
def _deal(self, line, num):
line = line.strip()
if len(line) == 0 or line[0] == '#':
return True
if len(line.split('=')) != 2:
return False
key, value = line.split('=')
key = key.strip()
value = value.strip()
self._config[key] = (value, num)
return True
#deal task config
#task config only support strict pattern: taskname.property=numbers
def _task_deal(self, config, key, value):
if len(key.split('.')) != 2:
return False
task_name, task_property = key.split('.')
#property need be in _task_propertys
if not task_property in self._task_propertys:
return False
#property need be numbers
if not re.match(r'^[1-9]\d*$', value):
return False
value = int(value)
#task need be in task folder
if not task_name in self._task_list:
return False
#all is ok, register to the config
if not config.get(task_name):
config[task_name] = dict()
config[task_name]['progress'] = 1
config[task_name]['thread'] = 1
config[task_name][task_property] = value
return config
#deal local config
#local config can support normal pattern: key1.key2.key3...keyn=valuel
def _local_deal(self, config, key, value):
if len(key) == 1:
config[key[0]] = value
else:
if not config.get(key[0]) or not isinstance(config[key[0]], dict):
config[key[0]] = dict()
config[key[0]] = self._local_deal(config[key[0]], key[1:], value)
return config
#deal error config
#error config value include error const, and error info
def _error_deal(self, config, key, value):
if len(value.split(':')) != 2:
return False
error_key, error_value = value.split(':')
error_key = error_key.strip()
error_value = error_value.strip()
config[error_key] = dict()
config[error_key]['num'] = key
config[error_key]['msg'] = error_value
return config
#init the task file name list
def _init_task_folder(self):
self._task_list = []
for task_name in os.listdir('task'):
if task_name[-3 : ] == '.py':
self._task_list.append(task_name[0 : -3])
#config check false, set the errors
def _set_error(self, value, line=0, name='ran'):
if line:
self._error_msg = name + '.config line ' + str(line) + ' error: ' + str(value)
else:
self._error_msg = 'check ' + name + '.config error:\n' + str(value)
#if config check false, you can get errors by the function
#the error info can be write in the ran log
def get_error(self):
return self._error_msg
#ran config check is complex
def check_ran(self, lines):
self._config = dict()
num = 1
for line in lines:
if not self._deal(line, num):
self._set_error(line, num)
return False
num += 1
config = dict()
#set progress dir
config['dir'] = sys.path[0]
#task refresh
value, num = self._config.get('config_refresh', ('no', 0))
if value != 'yes' and value != 'no':
self._set_error(value, num)
return False
config['config_refresh'] = False if value == 'no' else True
#task refresh time
value, num = self._config.get('config_refresh_time', ('60', 0))
if not re.match(r'^[1-9]\d*$', value):
self._set_error(value, num)
return False
config['config_refresh_time'] = int(value)
#socket_folder
value, num = self._config.get('socket_folder', ('tmp', 0))
if value.find('/') == 0:
config['socket_folder'] = value
else:
config['socket_folder'] = config['dir'] + '/' + value
if not os.path.exists(config['socket_folder']):
self._set_error(value + ' folder not exist', num)
return False
#socket_port
value, num = self._config.get('socket_port', ('7664', 0))
if not re.match(r'^[1-9]\d*$', value):
self._set_error(value, num)
return False
config['socket_port'] = int(value)
#log_file_folder
value, num = self._config.get('log_file_folder', ('log', 0))
#if not os.path.exists(file_folder):
config['log_file_folder'] = value
#log_udp_host
value, num = self._config.get('log_udp_host', ('127.0.0.1', 0))
config['log_udp_host'] = value
#log udp port
value, num = self._config.get('log_udp_port', ('5202', 0))
if not re.match(r'^[1-9]\d*$', value):
self._set_error(value, num)
return False
config['log_udp_port'] = int(value)
return config
def check_task(self, lines):
self._config = dict()
num = 1
for line in lines:
if not self._deal(line, num):
self._set_error(line, num, 'task')
return False
num += 1
self._init_task_folder()
config = dict()
for key, value in self._config.iteritems():
if not self._task_deal(config, key, value[0]):
self._set_error(key + '=' + value[0], value[1], 'task')
return False
return config
def check_local(self, lines):
self._config = dict()
num = 1
for line in lines:
if not self._deal(line, num):
self._set_error(line, num, 'local')
return False
num += 1
config = dict()
for key, value in self._config.iteritems():
self._local_deal(config, key.split('.'), value[0])
return config
def check_error(self, lines):
self._config = dict()
num = 1
for line in lines:
if not self._deal(line, num):
self._set_error(line, num, 'local')
return False
num += 1
config = dict()
for key, value in self._config.iteritems():
self._error_deal(config, key, value[0])
return config | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
84f87302873f18b014bc11be8a870f868e346cd5 | bd36269a7d7780e526c6e700f396baf7fffcb224 | /ctech403/module_1/fillin-function.py | 6090f315bbbac4e8f1f02a84c217ba6ec8982fea | [] | no_license | ecornelldev/ctech400s | 7bb26d901bb9daae5c2d0f4f2eb8dabd9fdfe10e | 8e394620dc2f2597161cc3ac94b1b97424f13d1a | refs/heads/master | 2021-06-23T21:23:16.198084 | 2020-12-22T19:06:42 | 2020-12-22T19:06:42 | 161,530,338 | 0 | 0 | null | 2020-12-30T04:10:33 | 2018-12-12T18:43:17 | HTML | UTF-8 | Python | false | false | 1,113 | py | import random
# Story template
story = 'Yesterday, I ___VERB___ to the store to buy a ___NOUN___. But on my way, I ran into a ___ADJ___ ___NOUN___. I was very ___ADJ___. Then I remembered that I had a ___NOUN___ in my pocket. I ___VERB___ behind a ___ADJ___ ___NOUN___.'
# Placeholders
NOUN_PLACEHOLDER = '___NOUN___'
ADJECTIVE_PLACEHOLDER = '___ADJ___'
VERB_PLACEHOLDER = '___VERB___'
# Word lists
NOUNS = ['cat', 'dog', 'zeppelin', 'boomerang', 'trombone']
ADJECTIVES = ['red', 'hunormous', 'intricate', 'merciless']
VERBS = ['vomited', 'catapulted', 'squeaked']
# Returns a string with each instance of placeholder in text replaced by a
# random choice from word_list
def fill_in(text, placeholder, word_list):
while placeholder in text:
new_word = random.choice(word_list)
text = text.replace(placeholder, new_word, 1)
return text
# Perform substitutions for each list of words
story = fill_in(story, NOUN_PLACEHOLDER, NOUNS)
story = fill_in(story, ADJECTIVE_PLACEHOLDER, ADJECTIVES)
story = fill_in(story, VERB_PLACEHOLDER, VERBS)
# Output story with substitutions
print(story)
| [
"james.grimmelmann@cornell.edu"
] | james.grimmelmann@cornell.edu |
ce9e6a7de16b5cdfa571a0dee0fd2036c6646815 | b3a90c38c61579a3de26ca398cc354a2fedbd367 | /python_crash_course/chapter_11/test_name_function.py | 56ef5ee3f7bc39c57f6974724ef7e1a833e4d6a3 | [] | no_license | JennifferLockwood/python_learning | a815e05e124aab02de694e48ee405958bbed8aac | fa054bb84778f278247128266095e061e65126b0 | refs/heads/master | 2016-08-12T20:29:36.221498 | 2016-03-13T18:10:04 | 2016-03-13T18:10:04 | 49,471,663 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | import unittest
from name_function import get_formatted_name
class NamesTestCase(unittest.TestCase):
"""Tests for 'name_function.py'."""
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
self.assertEqual(formatted_name, 'Janis Joplin')
def test_first_last_middle_name(self):
"""Do names like 'Wolfang Amadeus Mozart' work?"""
formatted_name = get_formatted_name('wolfang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfang Amadeus Mozart')
unittest.main()
| [
"jennifferlockwood@gmail.com"
] | jennifferlockwood@gmail.com |
5d2a5440ef001e2e281ac33bb7504dd9e176c1c6 | e463e169c6dcd5222dbba9c4c699f70b5eda3591 | /Chapter_8/private-critter.py | c28cb62c9baf64a7da396de3807110884329e945 | [] | no_license | mrwillbarnz/Python_FAB_Reimplementation | 1e00e3ecdcb6c9e3d671aae9dddf8aa475c01e8e | 5f02c6d3f392612fe17d97e648302ea2e3edf01c | refs/heads/master | 2022-04-12T05:47:11.577985 | 2020-03-01T17:37:40 | 2020-03-01T17:37:40 | 239,611,675 | 0 | 0 | null | 2020-02-16T19:28:19 | 2020-02-10T20:57:21 | Python | UTF-8 | Python | false | false | 705 | py | # Private Critter
# Demonstrates private variables and methods
class Critter(object):
"""A virtual pet."""
def __init__(self, name, mood):
print("A new critter has been born!")
self.name = name # public attribute
self.__mood = mood # private attribute
def talk(self):
print("\nI'm", self.name)
print("Right now I feel", self.__mood, "\n")
def __private_method(self):
print("This is a private method.")
def public_method(self):
print("This is a public method.")
self.__private_method()
# main
crit = Critter(name = "Poochie", mood = "happy")
crit.talk()
crit.public_method()
input("\n\nPress the enter key to exit.")
| [
"willbarnard687@pm.me"
] | willbarnard687@pm.me |
725f3663fa177e21fa5168b1c0c8db6c8e9596a5 | b023dc288ead04ce930fc16034bf47752c0a86a4 | /projecteuler2.py | b6910e07d34234fb29796fe88b6d8b1d7d369e16 | [] | no_license | leezichanga/Project-euler-toyproblems | b98c747b9d2c61cde76e5ad223e66e559ca63a33 | 438a5b48cb42e357def68360598b8d1850128734 | refs/heads/master | 2020-03-11T19:45:33.145514 | 2018-04-19T13:03:48 | 2018-04-19T13:03:48 | 130,217,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | #Solution 1
numbers = [1, 2]
total = 0
for i in range(4000000):
if i == numbers[-1] + numbers[-2]:
numbers.append(i)
for n in numbers:
if n % 2 == 0:
total += n
print(total)
#Solution 2
#!/bin/python3
import sys
t = int(input().strip())
for a0 in range(t):
fib_start =[1,2] #create the first 2 fibonnaci numbers
total = 0 #create default total value
n = int(input().strip())
while True: #while loop to append the next numbers in the sequence
fib_next = fib_start[-1] +fib_start[-2]
if fib_next < n:
fib_start.append(fib_next)
else:
break
for number in fib_start: #for loop to add the even digits
if number % 2 == 0:
total += number
print (total)
| [
"elizabbethichanga@yahoo.com"
] | elizabbethichanga@yahoo.com |
554c61067cbadc2773055a2c4062c1801556b3e4 | d652c5cd50abc59163288f67aabf511edf2ffc16 | /{{cookiecutter.package_name}}/{{cookiecutter.app_name}}/serializers/blog.py | 7a2777cb74b488f9a6b15e871cad3fd62c639f55 | [
"MIT"
] | permissive | sveetch/cookiecutter-sveetch-djangoapp | 2f883958a665a84423f9dcc0bbd794a67d91fb0e | 6770a00e5ed67702f61543c0495bc55dcebdc76a | refs/heads/master | 2023-04-03T18:05:59.380348 | 2023-03-17T16:26:15 | 2023-03-17T16:26:15 | 297,186,173 | 3 | 1 | null | 2020-10-12T00:52:41 | 2020-09-21T00:04:59 | null | UTF-8 | Python | false | false | 1,525 | py | from rest_framework import serializers
from ..models import Blog
class BlogIdField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
return Blog.objects.all()
class BlogSerializer(serializers.HyperlinkedModelSerializer):
"""
Complete representation for detail and writing usage.
"""
id = serializers.ReadOnlyField()
view_url = serializers.SerializerMethodField()
article_count = serializers.SerializerMethodField()
class Meta:
model = Blog
fields = '__all__'
extra_kwargs = {
"url": {
"view_name": "{{ cookiecutter.app_name }}:api-blog-detail"
},
}
def get_view_url(self, obj):
"""
Return the HTML detail view URL.
If request has been given to serializer this will be an absolute URL, else a
relative URL.
"""
url = obj.get_absolute_url()
request = self.context.get("request")
if request:
return request.build_absolute_uri(url)
return url
def get_article_count(self, obj):
"""
Return count of related articles.
"""
return obj.article_set.count()
class BlogResumeSerializer(BlogSerializer):
"""
Simpler Blog representation for nested list. It won't be suitable for writing
usage.
"""
class Meta:
model = BlogSerializer.Meta.model
fields = ["id", "url", "view_url", "title"]
extra_kwargs = BlogSerializer.Meta.extra_kwargs
| [
"sveetch@gmail.com"
] | sveetch@gmail.com |
9beb46105e59a68e3a054d35e6ff5164999cabc5 | 0f24c1e2df268a7c98314d5b3c6f8b5738f88ba9 | /test/test_addresses_api.py | 2b9cf39f54f0bd4d84f46319473ed53db7dbb64e | [
"MIT"
] | permissive | arberx/graphsense-python | b07be2854d4f6e763aacdad4045ae72c338bd4e2 | c0dafc97a04bc3dbf0caf08a981bb591bd1e430a | refs/heads/master | 2023-08-11T14:15:42.576434 | 2021-06-17T08:01:04 | 2021-06-17T08:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,079 | py | """
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.4.5
Generated by: https://openapi-generator.tech
"""
import unittest
import graphsense
from graphsense.api.addresses_api import AddressesApi # noqa: E501
class TestAddressesApi(unittest.TestCase):
"""AddressesApi unit test stubs"""
def setUp(self):
self.api = AddressesApi() # noqa: E501
def tearDown(self):
pass
def test_get_address_entity(self):
"""Test case for get_address_entity
Get an address with tags # noqa: E501
"""
pass
def test_get_address_with_tags(self):
"""Test case for get_address_with_tags
Get an address with tags # noqa: E501
"""
pass
def test_list_address_links(self):
"""Test case for list_address_links
Get transactions between two addresses # noqa: E501
"""
pass
def test_list_address_links_csv(self):
"""Test case for list_address_links_csv
Get transactions between two addresses as CSV # noqa: E501
"""
pass
def test_list_address_links_csv_eth(self):
"""Test case for list_address_links_csv_eth
Get transactions between two addresses as CSV # noqa: E501
"""
pass
def test_list_address_links_eth(self):
"""Test case for list_address_links_eth
Get transactions between two addresses # noqa: E501
"""
pass
def test_list_address_neighbors(self):
"""Test case for list_address_neighbors
Get an addresses' neighbors in the address graph # noqa: E501
"""
pass
def test_list_address_neighbors_csv(self):
"""Test case for list_address_neighbors_csv
Get an addresses' neighbors in the address graph as CSV # noqa: E501
"""
pass
def test_list_address_txs(self):
"""Test case for list_address_txs
Get all transactions an address has been involved in # noqa: E501
"""
pass
def test_list_address_txs_csv(self):
"""Test case for list_address_txs_csv
Get all transactions an address has been involved in as CSV # noqa: E501
"""
pass
def test_list_address_txs_csv_eth(self):
"""Test case for list_address_txs_csv_eth
Get all transactions an address has been involved in as CSV # noqa: E501
"""
pass
def test_list_address_txs_eth(self):
"""Test case for list_address_txs_eth
Get all transactions an address has been involved in # noqa: E501
"""
pass
def test_list_tags_by_address(self):
"""Test case for list_tags_by_address
Get attribution tags for a given address # noqa: E501
"""
pass
def test_list_tags_by_address_csv(self):
"""Test case for list_tags_by_address_csv
Get attribution tags for a given address # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"git@myrho.net"
] | git@myrho.net |
c3a959e4e06b55273a496c095e694aa6c1c774ca | 72f6f274a9e4937f99e61eebe14f9b2f301a83f5 | /utils/tokenizer.py | bf4279be1e7e24a5503d16496dfcd27c2bff72f0 | [] | no_license | studio-ousia/textent | e466f8ef4f6910a0f4270014fa29c18aa5f329e0 | 2a73ef2f6a0d29d4d1c1085a75fa0b7592bdd376 | refs/heads/master | 2021-03-22T04:45:57.582737 | 2018-06-03T07:18:28 | 2018-06-03T07:18:28 | 93,811,887 | 20 | 4 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | # -*- coding: utf-8 -*-
import re
class Token(object):
__slots__ = ('_text', '_span')
def __init__(self, text, span):
self._text = text
self._span = span
@property
def text(self):
return self._text
@property
def span(self):
return self._span
def __repr__(self):
return '<Token %s>' % self.text.encode('utf-8')
def __reduce__(self):
return (self.__class__, (self.text, self.span))
class RegexpTokenizer(object):
__slots__ = ('_rule',)
def __init__(self, rule=ur'[\w\d]+'):
self._rule = re.compile(rule, re.UNICODE)
def tokenize(self, text):
spans = [o.span() for o in self._rule.finditer(text)]
return [Token(text[s[0]:s[1]], s) for s in spans]
| [
"ikuya@ikuya.net"
] | ikuya@ikuya.net |
c42d55490407bfcfd3a591030db63cd5be9b2b58 | ad4c2aa0398406ccb7e70562560e75fa283ffa1a | /find-and-replace-in-string/find-and-replace-in-string.py | d3ee3f2ce5afaf66d400552e4e375febc9762f26 | [
"Apache-2.0"
] | permissive | kmgowda/kmg-leetcode-python | 427d58f1750735618dfd51936d33240df5ba9ace | 4d32e110ac33563a8bde3fd3200d5804db354d95 | refs/heads/main | 2023-08-22T06:59:43.141131 | 2021-10-16T14:04:32 | 2021-10-16T14:04:32 | 417,841,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | // https://leetcode.com/problems/find-and-replace-in-string
class Solution(object):
def findReplaceString(self, S, indexes, sources, targets):
"""
:type S: str
:type indexes: List[int]
:type sources: List[str]
:type targets: List[str]
:rtype: str
"""
d ={}
for i, ind in enumerate(indexes):
d[ind]=[sources[i], targets[i]]
start = 0
out=""
for ind in sorted(d.keys()):
out+=S[start:ind]
src, dst = d[ind]
if src == S[ind:ind+len(src)]:
out+=dst
else:
out+=S[ind:ind+len(src)]
start = ind+len(src)
out+=S[start:]
return out | [
"keshava.gowda@gmail.com"
] | keshava.gowda@gmail.com |
44f2dfba86e5e004678f934e9bfd00a8545929f3 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/domain/KoubeiMarketingCampaignBenefitQueryModel.py | d167d12a86faf113518b209e604ee0b9b496a368 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 937 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMarketingCampaignBenefitQueryModel(object):
def __init__(self):
self._benefit_id = None
@property
def benefit_id(self):
return self._benefit_id
@benefit_id.setter
def benefit_id(self, value):
self._benefit_id = value
def to_alipay_dict(self):
params = dict()
if self.benefit_id:
if hasattr(self.benefit_id, 'to_alipay_dict'):
params['benefit_id'] = self.benefit_id.to_alipay_dict()
else:
params['benefit_id'] = self.benefit_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMarketingCampaignBenefitQueryModel()
if 'benefit_id' in d:
o.benefit_id = d['benefit_id']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
1d360fbb5e6d6248351227c7dbb7361fc5d36f9b | c71d332dd845036c21c9fd8f4f571f9209bf2672 | /Binary Tree Zingzag Level Order Traversal.py | 5573d8950e53f17add11faff31d2f61793ba6f3e | [] | no_license | diksha12p/DSA_Practice_Problems | 2884fd9e77094d9662cb8747744dd2ef563e25e4 | d56e3d07620d51871199f61ae82cff2bd75b4744 | refs/heads/master | 2023-01-20T15:31:37.824918 | 2020-11-29T21:37:12 | 2020-11-29T21:37:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | """
LC 103. Binary Tree Zigzag Level Order Traversal
Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right, then right to left for the next level and alternate between).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
from typing import List
class Node:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def zigzagLevelOrder(self, root: Node) -> List[List[int]]:
if not root:
return []
queue = [root]
flag, result = 1, []
while queue:
list_entry = list()
if flag == -1:
result.append([node.val for node in queue[::-1] if node is not None ])
else:
result.append([node.val for node in queue if node is not None])
flag *= -1
for node in queue:
if node.left: list_entry.append(node.left)
if node.right: list_entry.append(node.right)
queue = list_entry
return result
root = Node(3)
root.left = Node(9)
root.right = Node(20)
# root.left.left = Node(7)
# root.left.right = Node(6)
root.right.left = Node(15)
root.right.right = Node(7)
sol = Solution()
print(sol.zigzagLevelOrder(root)) | [
"noreply@github.com"
] | diksha12p.noreply@github.com |
551d529e69d4ddcf6ee18ff69888a85c3916d14c | 7e96ba20c25c6fb56af6ccd36b3b6d68df6a081c | /Kyle_Marienthal/DJANGO/TRAVEL_BUDDY_REDO/apps/travels_app/views.py | 39b2f550fcb8e27dacd385ab3fc16e1471ac4812 | [] | no_license | CodingDojoDallas/python_september_2017 | 9d8cd74131a809bc6b13b7f465594cf8b1e2fd75 | f9f2f7b39bf9c4fceda3df5dc7424164aa5d5df5 | refs/heads/master | 2021-01-23T08:52:22.899994 | 2017-10-30T17:00:55 | 2017-10-30T17:00:55 | 102,558,291 | 2 | 14 | null | 2018-01-13T05:28:34 | 2017-09-06T03:28:38 | Python | UTF-8 | Python | false | false | 1,573 | py | from django.shortcuts import render, redirect, reverse
from django.contrib import messages
from .models import Trip, User
# Create your views here.
# if request.method == "POST":
# context = {
# 'current_user' : current_user(request)
# }
def flash_errors(errors, request):
print '*****youre in the flash_errors method*****'
for error in errors:
messages.error(request, error)
def current_user(request):
print '*****youre in the current_user method*****'
if 'user_id' in request.session:
return User.objects.get(id=request.session['user_id'])
def dashboard(request):
print '***** youre in the travel dashboard method*****'
trips = Trip.objects.all()
other_users = {User.objects.all().exclude(id=current_user(request).id)}
context = {
'current_user' : current_user(request),
'trips' : trips,
'other_users' : other_users
}
return render(request, 'travels_app/dashboard.html', context)
def add_trip(request):
print '***** youre in the travel add_trip method*****'
context = {
'current_user' : current_user(request)
}
return render(request, 'travels_app/add_trip.html', context)
def create_trip(request):
print '***** youre in the travel create_trip method*****'
user = current_user(request)
trips = Trip.objects.create_trip(request.POST, user)
return redirect(reverse('dashboard'))
def destination(request, id):
context = {
'current_user' : current_user(request)
}
return render(request, 'travels_app/destination.html', context)
| [
"kylemarienthal@gmail.com"
] | kylemarienthal@gmail.com |
cd5f6d120bc92d0c0c9c0e040a3e4b7b80b53691 | 14f4d045750f7cf45252838d625b2a761d5dee38 | /argo/test/test_io_k8s_api_core_v1_service_account_token_projection.py | 05a59298e73201e88e387277ff653382026d9a8f | [] | no_license | nfillot/argo_client | cf8d7413d728edb4623de403e03d119fe3699ee9 | c8cf80842f9eebbf4569f3d67b9d8eff4ba405fa | refs/heads/master | 2020-07-11T13:06:35.518331 | 2019-08-26T20:54:07 | 2019-08-26T20:54:07 | 204,546,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo
from models.io_k8s_api_core_v1_service_account_token_projection import IoK8sApiCoreV1ServiceAccountTokenProjection # noqa: E501
from argo.rest import ApiException
class TestIoK8sApiCoreV1ServiceAccountTokenProjection(unittest.TestCase):
"""IoK8sApiCoreV1ServiceAccountTokenProjection unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiCoreV1ServiceAccountTokenProjection(self):
"""Test IoK8sApiCoreV1ServiceAccountTokenProjection"""
# FIXME: construct object with mandatory attributes with example values
# model = argo.models.io_k8s_api_core_v1_service_account_token_projection.IoK8sApiCoreV1ServiceAccountTokenProjection() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"nfillot@weborama.com"
] | nfillot@weborama.com |
922489df49a0e7b815588906680161ed4b166cc8 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/perel/bs.py | f2dc670dffa12519d8b54937cfa2bb0693987963 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,003 | py | import time
##import sys
##sys.setrecursionlimit(10002)
from collections import deque
def flip(s):
return tuple(reversed([not i for i in s]))
def solve(s):
s=tuple([i=='+' for i in s])
return str(solve11(s))
def solve11(s):
for i in range(len(s)-1,-1,-1):
if not s[i]:
break
else:
return 0
s=s[:i+1]
step=0
for i in range(len(s)):
if not s[i]:
break
else:
return 1
if i:
step+=1
s=flip(s[:i])+s[i:]
return solve11(flip(s))+step+1
def main():
fi=file('bl.in')
fo=file('b.out','w')
time0=time.time()
t=int(fi.readline())
for ti in range(t):
time1=time.time()
s=fi.readline()[:-1]
ans="Case #%d: %s"%(ti+1,solve(s))
print ans,"%.3f"%(time.time()-time1)
fo.write(ans+'\n')
print "%.3f"%(time.time()-time0)
fi.close()
fo.close()
if __name__ == '__main__':
main()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
f67d673e20f5b4ea25b5e71532405d816a977b47 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2580/8318/319102.py | 3935e927bbc2f82b8da2de889d54f179ef80f578 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | m = int(input())
n = int(input())
ops = int(input())
list = [[]]
x = m
y = n
for i in range(ops):
a,b = input().split(",")
a1=int(a)
b1=int(b)
if x>a1 :
x=a1
if y>b1:
y=b1
print(x*y) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f8ef5490192913bf8811b092910983b29985832f | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc094/A/4025845.py | 9522ebc713f410e45b2cfec4a62d282a3b36443e | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import array
from bisect import *
from collections import *
import fractions
import heapq
from itertools import *
import math
import random
import re
import string
import sys
nums = list(map(int, input().split()))
nums.sort()
ans = 0
while nums[0] != nums[-1]:
if nums[0] < nums[1]:
nums[0] += 2
else:
nums[0] += 1
nums[1] += 1
ans += 1
nums.sort()
print(ans) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
533149ed9a8326403f4b55c07064105b91edd0f3 | 11b503cd75f546465eb7d7e436f88587a0fa8596 | /calculating_area.py | 618867a889ea6bfff8a28d94fcf0c46859bf78f1 | [] | no_license | ibnahmadCoded/how_to_think_like_a_computer_scientist_Chapter_11 | d36074d71f5df92dfab864e827ba0968ee10d07e | e3fa08a5b64b62700a9590e2fff3c5c2d762a6f8 | refs/heads/master | 2022-04-17T01:30:57.788470 | 2020-04-15T20:08:11 | 2020-04-15T20:08:11 | 256,024,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | from study import *
class Rectangle:
""" A class to manufacture rectangle objects """
def __init__(self, posn, w, h):
""" Initialize rectangle at posn, with width w, height h """
self.corner = posn
self.width = w
self.height = h
def __str__(self):
return "({0}, {1}, {2})".format(self.corner, self.width,
self.height)
def grow(self, delta_width, delta_height):
""" Grow (or shrink) this object by the deltas """
self.width += delta_width
self.height += delta_height
def move(self, dx, dy):
""" Move this object by the deltas """
self.corner.x += dx
self.corner.y += dy
def area(self):
"""Returns area of rectangle object"""
return self.width * self.height
| [
"alegeaa@yahoo.com"
] | alegeaa@yahoo.com |
5c330645e90f346492de9fffdb54c9a30ee92529 | e73f0bd1e15de5b8cb70f1d603ceedc18c42b39b | /Project Euler/018 - triangle path sum.py | e1bc49732e1df9a18b08e09522015fe6b956ba5e | [] | no_license | thran/the_code | cbfa3b8be86c3b31f76f6fbd1deb2013d3326a4a | ba73317ddc42e10791a829cc6e1a3460cc601c44 | refs/heads/master | 2023-01-05T14:39:16.708461 | 2022-12-25T08:37:39 | 2022-12-25T08:37:39 | 160,978,160 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | triangle = """75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
triangle = ([map(int, line.split()) for line in triangle.split("\n")])
# print triangle
maxs = [[0, triangle[0][0], 0]]
for line_number in range(1, len(triangle)):
m_line = [0]
for position in range(line_number+1):
m = max(maxs[line_number-1][position], maxs[line_number-1][position+1])
m_line.append(triangle[line_number][position] + m)
maxs.append(m_line+[0])
print max(maxs[-1])
| [
"thran@centrum.cz"
] | thran@centrum.cz |
dad79b226929c9b8fefe77d75b0102dbdc30556c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_197/ch37_2020_03_23_20_05_44_218037.py | 90428a35c7cf9ff017b68e78d1f501efd0391b52 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | senha="desisto"
Palavra=input("Qual é a senha?")
while Palavra!=senha:
print (Palavra)
else:
print ("Você acertou a senha!") | [
"you@example.com"
] | you@example.com |
178b791256772b51daf76b2092c025e4e218ef8b | 09aff11fe5ccac7548d49db3a92803675117eaf3 | /BackEnd/Semana3/Dia4/02-condicionales.py | 92c0a9f066e137b627daaad228c770f8130984cb | [] | no_license | jorgegarba/CodiGo8 | 4defe235a790ebc248f9278f18ca050fde6c195f | f22b2b405ad999e5b960ce5f52936cd4e472af35 | refs/heads/master | 2023-01-09T02:14:27.134870 | 2020-03-18T03:15:52 | 2020-03-18T03:15:52 | 211,871,983 | 4 | 1 | null | 2023-01-07T22:11:22 | 2019-09-30T13:54:52 | JavaScript | UTF-8 | Python | false | false | 1,638 | py | # condiciona if else elif
a = 3
b = 4
# print(a > b)
# if (a > b):
# print("a es mayor que b")
# else:
# print("b es mayor que a")
# elif -> verifica esta otra condicion a ver si
# es correcta, tiene que ir antes de el else
num = 8
# if(num>0):
# print("Es un numero positivo")
# elif (num==0):
# print("Es cero")
# else:
# print("Es negativo")
# for -> es usado para iterar sobre una secuencia
# de elementos
cadena = "Buenos dias"
# for letra in cadena:
# print(letra,end="\n")
# for (let i=0 ; i<10; i++)
# for i in range(0,10,3):
# print(i)
# break -> para parar el bucle
# for numero in range(0,10):
# if(numero==5):
# break
# print(numero)
# continue -> para parar SOLO la iteraccion actual
# for numero in range(10):
# if(numero==6):
# continue
# print(numero)
# for doble
# for numero1 in range(4):
# for numero2 in range(3):
# print(numero1,numero2)
#
# while -> un bucle infinito de acciones mientras sea cierta la condicion
valor = 1
fin = 10
# while(valor<fin):
# print(valor)
# valor += 1 # valor ++
# pass -> no hace nada, solo indica que pase a la siguiente iteracion
# Ejercicio: que de una lista de numeros
numeros = [1,2,3,4,5,6,7,8,9]
#guarde todos los pares en la lista numeros_pares y los impares en la lista
# numeros_impares y que muestre las listas y la lista de numeros debe quedar vacia
numeros_pares=[]
numeros_impares=[]
for numero in numeros:
if(numero%2==0):
numeros_pares.append(numero)
else:
numeros_impares.append(numero)
print(numeros_pares)
print(numeros_impares)
numeros.clear()
print(numeros)
| [
"ederiveroman@gmail.com"
] | ederiveroman@gmail.com |
e6e492a0ce1247655b69757dedda44645ef50458 | 0fba29dd8bb4e8e6a5d12b54d8f09d5b5c835576 | /23_personal/test.py | 143a4fd013f75e1d07534c24c9c78ee0f8d1ff23 | [] | no_license | buxuele/100-days-of-code | 4d37ab955d0cd463a1dd40ca298dac710c84237d | 58c326aca1425911a6979b1a9e305e0a3ed1b0d4 | refs/heads/master | 2020-04-25T23:10:22.816754 | 2019-05-11T01:00:21 | 2019-05-11T01:00:21 | 173,136,028 | 0 | 0 | null | 2019-04-11T20:42:42 | 2019-02-28T15:22:08 | HTML | UTF-8 | Python | false | false | 367 | py | #!/usr/bin/python3
# Time: 2019/04/24 10:19 PM
# \u7f8a\u7531\u5927\u4e95\592b\u5927\u4eba\u738b\u4e2d\u5de5
import codecs
gg = '\346\203\263\347\234\213\346\255\243\351\235\242\357\274\237\351\202\243\345\260\261\350\246\201\347\234\213\344\273\224\347\273\206\344\272\206\357\274\201'.encode("utf-8")
print(type(gg))
# a = codecs.decode(gg, "")
# print(a)
| [
"baogebuxuele@163.com"
] | baogebuxuele@163.com |
d80766c27a44af916ff6ef4330c81821db07f418 | b9fdfa1ad9315f54f198ab7d918f59e2c45b95ed | /additional_pipelines_used_in_LIFE/preprocessing_ica_aroma/build/keepalive/keepalive/__init__.py | 65dd57b5cad491357173661182777a54bb457052 | [] | no_license | fBeyer89/LIFE_rs_ICA_preprocessing | 4829e8282b06918aa4e610b26e9e0f0c5545b8b5 | 4868bf2d734d0dab89fc21f876aca400e1de1a5f | refs/heads/master | 2021-01-10T12:09:04.050976 | 2019-04-18T13:05:47 | 2019-04-18T13:05:47 | 44,090,982 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | # -*- coding: utf-8 -*-
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
# This file was part of urlgrabber, a high-level cross-protocol url-grabber
# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
# Copyright 2015 Sergio Fernández
from keepalive import *
__version__ = "0.5"
| [
"fbeyer@cbs.mpg.de"
] | fbeyer@cbs.mpg.de |
0e5e071104c431a893ca44a126aeda1409c77336 | a045055cb41f7d53e1b103c3655a17dc4cd18d40 | /python-master/kubernetes/test/test_v1_role_list.py | ce35aa97b1362b07e046028323570ffbd4a54b7c | [] | no_license | 18271693176/copy | 22f863b180e65c049e902de0327f1af491736e5a | ff2511441a2df03817627ba8abc6b0e213878023 | refs/heads/master | 2020-04-01T20:20:28.048995 | 2018-11-05T02:21:53 | 2018-11-05T02:21:53 | 153,599,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_role_list import V1RoleList
class TestV1RoleList(unittest.TestCase):
""" V1RoleList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1RoleList(self):
"""
Test V1RoleList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_role_list.V1RoleList()
pass
if __name__ == '__main__':
unittest.main()
| [
"906317366@qq.com"
] | 906317366@qq.com |
4bf1b976b01d1825146b5147af5e3ab534fd4d6c | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /nemo/collections/nlp/data/token_classification/token_classification_utils.py | 94acd69d3b11374984fa4e13ef269755039d8c70 | [
"Apache-2.0"
] | permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 7,468 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import re
import string
from typing import Dict
from nemo.collections.nlp.data.data_utils.data_preprocessing import (
fill_class_weights,
get_freq_weights,
get_label_stats,
)
from nemo.utils import logging
__all__ = ['get_label_ids', 'create_text_and_labels']
def remove_punctuation(word: str):
"""
Removes all punctuation marks from a word except for '
that is often a part of word: don't, it's, and so on
"""
all_punct_marks = string.punctuation.replace("'", '')
return re.sub('[' + all_punct_marks + ']', '', word)
def create_text_and_labels(output_dir: str, file_path: str, punct_marks: str = ',.?'):
"""
Create datasets for training and evaluation.
Args:
output_dir: path to the output data directory
file_path: path to file name
punct_marks: supported punctuation marks
The data will be split into 2 files: text.txt and labels.txt. \
Each line of the text.txt file contains text sequences, where words\
are separated with spaces. The labels.txt file contains \
corresponding labels for each word in text.txt, the labels are \
separated with spaces. Each line of the files should follow the \
format: \
[WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \
[LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
"""
if not os.path.exists(file_path):
raise ValueError(f'{file_path} not found')
os.makedirs(output_dir, exist_ok=True)
base_name = os.path.basename(file_path)
labels_file = os.path.join(output_dir, 'labels_' + base_name)
text_file = os.path.join(output_dir, 'text_' + base_name)
with open(file_path, 'r') as f:
with open(text_file, 'w') as text_f:
with open(labels_file, 'w') as labels_f:
for line in f:
line = line.split()
text = ''
labels = ''
for word in line:
label = word[-1] if word[-1] in punct_marks else 'O'
word = remove_punctuation(word)
if len(word) > 0:
if word[0].isupper():
label += 'U'
else:
label += 'O'
word = word.lower()
text += word + ' '
labels += label + ' '
text_f.write(text.strip() + '\n')
labels_f.write(labels.strip() + '\n')
print(f'{text_file} and {labels_file} created from {file_path}.')
def get_label_ids(
label_file: str,
is_training: bool = False,
pad_label: str = 'O',
label_ids_dict: Dict[str, int] = None,
get_weights: bool = True,
class_labels_file_artifact='label_ids.csv',
):
"""
Generates str to int labels mapping for training data or checks correctness of the label_ids_dict
file for non-training files or if label_ids_dict is specified
Args:
label_file: the path of the label file to process
is_training: indicates whether the label_file is used for training
pad_label: token used for padding
label_ids_dict: str label name to int ids mapping. Required for non-training data.
If specified, the check that all labels from label_file are present in label_ids_dict will be performed.
For training data, if label_ids_dict is None, a new mapping will be generated from label_file.
get_weights: set to True to calculate class weights, required for Weighted Loss.
class_labels_file_artifact: name of the file to save in .nemo
"""
if not os.path.exists(label_file):
raise ValueError(f'File {label_file} was not found.')
logging.info(f'Processing {label_file}')
if not is_training and label_ids_dict is None:
raise ValueError(
f'For non training data, label_ids_dict created during preprocessing of the training data '
f'should be provided'
)
# collect all labels from the label_file
data_dir = os.path.dirname(label_file)
unique_labels = set(pad_label)
all_labels = []
with open(label_file, 'r') as f:
for line in f:
line = line.strip().split()
all_labels.extend(line)
unique_labels.update(line)
# check that all labels from label_file are present in the specified label_ids_dict
# or generate label_ids_dict from data (for training only)
if label_ids_dict:
logging.info(f'Using provided labels mapping {label_ids_dict}')
for name in unique_labels:
if name not in label_ids_dict:
raise ValueError(f'{name} class from {label_file} not found in the provided mapping: {label_ids_dict}')
else:
label_ids_dict = {pad_label: 0}
if pad_label in unique_labels:
unique_labels.remove(pad_label)
for label in sorted(unique_labels):
label_ids_dict[label] = len(label_ids_dict)
label_ids_filename = os.path.join(data_dir, class_labels_file_artifact)
if is_training:
with open(label_ids_filename, 'w') as f:
labels, _ = zip(*sorted(label_ids_dict.items(), key=lambda x: x[1]))
f.write('\n'.join(labels))
logging.info(f'Labels mapping {label_ids_dict} saved to : {label_ids_filename}')
# calculate label statistics
base_name = os.path.splitext(os.path.basename(label_file))[0]
stats_file = os.path.join(data_dir, f'{base_name}_label_stats.tsv')
if os.path.exists(stats_file) and not is_training and not get_weights:
logging.info(f'{stats_file} found, skipping stats calculation.')
else:
all_labels = [label_ids_dict[label] for label in all_labels]
logging.info(f'Three most popular labels in {label_file}:')
total_labels, label_frequencies, max_id = get_label_stats(all_labels, stats_file)
logging.info(f'Total labels: {total_labels}. Label frequencies - {label_frequencies}')
if get_weights:
class_weights_pkl = os.path.join(data_dir, f'{base_name}_weights.p')
if os.path.exists(class_weights_pkl):
class_weights = pickle.load(open(class_weights_pkl, 'rb'))
logging.info(f'Class weights restored from {class_weights_pkl}')
else:
class_weights_dict = get_freq_weights(label_frequencies)
logging.info(f'Class Weights: {class_weights_dict}')
class_weights = fill_class_weights(class_weights_dict, max_id)
pickle.dump(class_weights, open(class_weights_pkl, "wb"))
logging.info(f'Class weights saved to {class_weights_pkl}')
else:
class_weights = None
return label_ids_dict, label_ids_filename, class_weights
| [
"noreply@github.com"
] | NVIDIA.noreply@github.com |
ce764895f86745e70087150b9abf4c75d5737670 | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/6194556/snippet.py | b70124de919065b567273fb101936d8671b21e0e | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 1,312 | py | '''
SimpleHTTPServerSSL.py - simple HTTP server supporting SSL/TLS. I.e. HTTPS. For python 3.3
- replace CERT and KEY with the location of your .pem server file.
- the default port is 443.
usage: python SimpleHTTPServerSSL.py
based on http://code.activestate.com/recipes/442473-simple-http-server-supporting-ssl-secure-communica/
'''
import socket, os
from socketserver import BaseServer
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
import ssl
CERT = 'ssl.crt.pem'
KEY = 'ssl.key.pem'
class SecureHTTPServer(HTTPServer):
def __init__(self, server_address, HandlerClass):
BaseServer.__init__(self, server_address, HandlerClass)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.load_cert_chain(certfile=CERT, keyfile=KEY)
self.socket = ctx.wrap_socket(socket.socket(self.address_family, self.socket_type), server_side=True)
self.server_bind()
self.server_activate()
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = SecureHTTPServer):
server_address = ('', 443) # (address, port)
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTPS on", sa[0], "port", sa[1], "...")
httpd.serve_forever()
if __name__ == '__main__':
test() | [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
0e2625b5eeb8fad171f69a84170920695873a752 | 3c8701e04900389adb40a46daedb5205d479016c | /oldboy-python18/day18-model/day18/cms/cms/app01/models.py | 10494d9551d007fb7aaff17983302a15f712c02e | [] | no_license | huboa/xuexi | 681300653b834eaf506f49987dcca83df48e8db7 | 91287721f188b5e24fbb4ccd63b60a80ed7b9426 | refs/heads/master | 2020-07-29T16:39:12.770272 | 2018-09-02T05:39:45 | 2018-09-02T05:39:45 | 73,660,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from django.db import models
# Create your models here.
class Book(models.Model):
id=models.AutoField(primary_key=True)
title=models.CharField(max_length=32)
pubDate=models.DateField()
price=models.DecimalField(max_digits=6,decimal_places=2)
publish=models.CharField(max_length=32)
| [
"wxcr11@gmail.com"
] | wxcr11@gmail.com |
ed6332db56631c9ab5f726d3b84ed62bdbee475a | b12adda0b77dba851f1a09b92c4553da6333ffaf | /tools/ttn/ttn_constants.py | a98a422d1e567b66e82d1b1ed6ab5664371204a1 | [] | no_license | Vignesh2208/Titan | c9a86e236e150b1de80d5054b48b9bc482d2785b | 3366200b47c6f81fc8cafb449307325f7cf45da0 | refs/heads/master | 2021-06-22T01:58:10.836590 | 2021-04-14T22:31:33 | 2021-04-14T22:31:33 | 213,460,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | """Some constants used by ttn application."""
import os
TTN_FOLDER_NAME = '.ttn'
TTN_CONFIG_DIR = f'{os.path.expanduser("~")}/{TTN_FOLDER_NAME}'
PROJECT_NAME_KEY = 'PROJECT_NAME'
PROJECT_SRC_DIR_KEY = 'PROJECT_SRC_DIR'
PROJECT_CLANG_INIT_PARAMS_KEY = 'PROJECT_CLANG_INIT_PARAMS'
PROJECT_CLANG_LOCK_KEY = 'PROJECT_CLANG_LOCK'
PROJECT_ARCH_NAME = 'PROJECT_ARCH_NAME'
PROJECT_ARCH_TIMINGS_PATH_KEY = 'PROJECT_ARCH_TIMINGS_PATH'
BBL_COUNTER_KEY = 'BBL_Counter'
LOOP_COUNTER_KEY = 'Loop_Counter'
NIC_SPEED_MBPS_KEY = 'NIC_SPEED_MBPS'
CPU_CYCLE_NS_KEY = 'CPU_CYCLES_NS'
ROB_SIZE_KEY = 'ROB_SIZE'
DISPATCH_UNITS_KEY = 'DISPATCH_UNITS'
TIMING_MODEL_KEY = 'TIMING_MODEL'
# TODO: For now we only support one level of cache modelling i.e L1 cache
L1_INS_CACHE_SIZE_KEY = 'L1_INS_CACHE_SIZE_KB'
L1_INS_CACHE_LINES_SIZE_KEY = 'L1_INS_CACHE_LINE_SIZE'
L1_INS_CACHE_REPLACEMENT_POLICY_KEY = 'L1_INS_CACHE_REPLACEMENT_POLICY'
L1_INS_CACHE_MISS_CYCLES_KEY = 'L1_INS_CACHE_MISS_CYCLES'
L1_INS_CACHE_ASSOCIATIVITY_KEY = 'L1_INS_CACHE_ASSOCIATIVITY'
L1_DATA_CACHE_SIZE_KEY = 'L1_DATA_CACHE_SIZE_KB'
L1_DATA_CACHE_LINES_SIZE_KEY = 'L1_DATA_CACHE_LINE_SIZE'
L1_DATA_CACHE_REPLACEMENT_POLICY_KEY = 'L1_DATA_CACHE_REPLACEMENT_POLICY'
L1_DATA_CACHE_MISS_CYCLES_KEY = 'L1_DATA_CACHE_MISS_CYCLES'
L1_DATA_CACHE_ASSOCIATIVITY_KEY = 'L1_DATA_CACHE_ASSOCIATIVITY'
DEFAULT_PROJECT_NAME = 'DEFAULT'
DEFAULT_PROJECT_ARCH = 'NONE'
DEFAULT_PROJECT_SRC_DIR = '/tmp'
DEFAULT_NIC_SPEED_MBPS = 1000
# TODO: For now we only support one level of cache modelling i.e L1 cache
# Associativity = 1 <=> Direct-Mapped cache
DEFAULT_L1_INS_CACHE_ASSOCIATIVITY = 8
DEFAULT_L1_INS_CACHE_SIZE_KB = 32
DEFAULT_L1_INS_CACHE_LINE_SIZE_BYTES = 64
DEFAULT_L1_INS_CACHE_REPLACEMENT_POLICY = 'LRU'
DEFAULT_L1_INS_CACHE_MISS_CYCLES = 100
# Associativity = 1 <=> Direct-Mapped cache
DEFAULT_L1_DATA_CACHE_ASSOCIATIVITY = 8
DEFAULT_L1_DATA_CACHE_SIZE_KB = 32
DEFAULT_L1_DATA_CACHE_LINE_SIZE_BYTES = 64
DEFAULT_L1_DATA_CACHE_REPLACEMENT_POLICY = 'LRU'
DEFAULT_L1_DATA_CACHE_MISS_CYCLES = 100
DEFAULT_ROB_SIZE = 1024
DEFAULT_NUM_DISPATCH_UNITS = 8
DEFAULT_TIMING_MODEL = 'EMPIRICAL'
NO_ARCH = 'ARCH_NONE'
| [
"vig2208@gmail.com"
] | vig2208@gmail.com |
d06972574b5df6446365a0de414d5d4a6a78d0d2 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/securitydevops/azure-mgmt-securitydevops/generated_samples/azure_dev_ops_org_get.py | 40d2f4d019e8bc017523654781724c90fb9e4c69 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,649 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.securitydevops import MicrosoftSecurityDevOps
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-securitydevops
# USAGE
python azure_dev_ops_org_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MicrosoftSecurityDevOps(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.azure_dev_ops_org.get(
resource_group_name="westusrg",
azure_dev_ops_connector_name="testconnector",
azure_dev_ops_org_name="myOrg",
)
print(response)
# x-ms-original-file: specification/securitydevops/resource-manager/Microsoft.SecurityDevOps/preview/2022-09-01-preview/examples/AzureDevOpsOrgGet.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
dbd811dd5df7010727684b4f4b84e20d4e19aec4 | 18430833920b3193d2f26ed526ca8f6d7e3df4c8 | /src/audit_trail/factories.py | 1c5f929866090df1f20621c64c05d9739653dd4a | [
"MIT"
] | permissive | providenz/phase | ed8b48ea51d4b359f8012e603b328adf13d5e535 | b0c46a5468eda6d4eae7b2b959c6210c8d1bbc60 | refs/heads/master | 2021-01-17T06:56:07.842719 | 2016-06-28T11:17:53 | 2016-06-28T11:17:53 | 47,676,508 | 0 | 0 | null | 2015-12-09T07:45:19 | 2015-12-09T07:45:18 | null | UTF-8 | Python | false | false | 379 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import factory
from factory import fuzzy
from accounts.factories import UserFactory
from .models import Activity
class ActivityFactory(factory.DjangoModelFactory):
class Meta:
model = Activity
actor = factory.SubFactory(UserFactory)
verb = fuzzy.FuzzyChoice(zip(*Activity.VERB_CHOICES)[0])
| [
"lp@providenz.fr"
] | lp@providenz.fr |
2d7a63594e9b6b12e395a0d8dcef370df874978a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03739/s362994379.py | 1519886d81b2b0628ca8e5ba81794a786daf66d9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | n = int(input())
s = list(map(int, input().split()))
def cost(t):
res = 0
sum = 0
for y in s:
sum += y
if sum * t <= 0:
res += abs(sum - t)
sum = t
t *= -1
return res
print(min(cost(1), cost(-1)))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
aca2d0c08f81edea01fb482800cfba161df2fc57 | 1a42c5a6e806deabb721c77619c9bc45bd01c78f | /lifegame.py | e271e25afc804ab32f5046372138895359d72099 | [] | no_license | nsakki55/smartinfo | 74f7ee2337518baef76812468b3f50785bcf4541 | 2777262a2c933d39bc35d1f9b674c8e374da63b7 | refs/heads/master | 2020-06-02T12:07:57.434869 | 2019-06-10T10:50:30 | 2019-06-10T10:50:30 | 191,149,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,751 | py | #!/usr/bin/env python
#coding:utf-8
import pygame
from pygame.locals import *
import random
import sys
import datetime
SCR_RECT = Rect(0, 0, 1440, 1080) # スクリーンサイズ
CS = 10 # セルのサイズ
NUM_ROW = int(SCR_RECT.height / CS) # フィールドの行数
NUM_COL = int(SCR_RECT.width / CS) # フィールドの列数
DEAD, ALIVE = 0, 1 # セルの生死定数
RAND_LIFE = 0.1
class LifeGame:
def __init__(self):
dt_now=datetime.datetime.now()
print(dt_now.microsecond)
pygame.init()
screen = pygame.display.set_mode(SCR_RECT.size)
pygame.display.set_caption(u"Life Game")
self.font = pygame.font.SysFont(None, 16)
# NUM_ROW x NUM_COLサイズのフィールド(2次元リスト)
self.field = [[DEAD for x in range(NUM_COL)] for y in range(NUM_ROW)]
self.generation = 0 # 世代数
self.run = False # シミュレーション実行中か?
self.cursor = [NUM_COL/2, NUM_ROW/2] # カーソルの位置
# ライフゲームを初期化
self.clear()
# メインループ
clock = pygame.time.Clock()
while True:
clock.tick(60)
# dt_now=datetime.datetime.now()
# print(dt_now.microsecond)
self.update()
self.draw(screen)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
# 矢印キーでカーソルを移動
elif event.key == K_LEFT:
self.cursor[0] -= 1
if self.cursor[0] < 0: self.cursor[0] = 0
elif event.key == K_RIGHT:
self.cursor[0] += 1
if self.cursor[0] > NUM_COL-1: self.cursor[0] = NUM_COL-1
elif event.key == K_UP:
self.cursor[1] -= 1
if self.cursor[1] < 0: self.cursor[1] = 0
elif event.key == K_DOWN:
self.cursor[1] += 1
if self.cursor[1] > NUM_ROW-1: self.cursor[1] = NUM_ROW-1
# スペースキーでカーソルのセルを反転
elif event.key == K_SPACE:
x, y = self.cursor
x=int(x)
y=int(y)
if self.field[y][x] == DEAD:
self.field[y][x] = ALIVE
elif self.field[y][x] == ALIVE:
self.field[y][x] = DEAD
# sキーでシミュレーション開始
elif event.key == K_s:
self.run = not self.run
# nキーで1世代だけ進める
elif event.key == K_n:
self.step()
# cキーでクリア
elif event.key == K_c:
self.clear()
# rキーでランダムに生きているセルを追加
elif event.key == K_r:
self.rand()
elif event.type == MOUSEBUTTONDOWN and event.button == 1:
# 左ボタンクリックでセルを反転
px, py = event.pos
x, y = px/CS, py/CS
x=int(x)
y=int(y)
self.cursor = [x, y]
if self.field[y][x] == DEAD:
self.field[y][x] = ALIVE
elif self.field[y][x] == ALIVE:
self.field[y][x] = DEAD
def clear(self):
"""ゲームを初期化"""
self.generation = 0
for y in range(NUM_ROW):
for x in range(NUM_COL):
self.field[y][x] = DEAD
for i in range(NUM_ROW):
self.field[i][int(NUM_COL/2)]=ALIVE
for i in range(NUM_ROW):
self.field[i][int(NUM_COL/2)-1]=ALIVE
for i in range(NUM_COL):
self.field[int(NUM_ROW/2)][i]=ALIVE
for i in range(NUM_COL):
self.field[int(NUM_ROW/2)-1][i]=ALIVE
def rand(self):
"""ランダムに生きているセルを追加"""
for y in range(NUM_ROW):
for x in range(NUM_COL):
if random.random() < RAND_LIFE:
self.field[y][x] = ALIVE
def update(self):
"""フィールドを更新"""
if self.run:
self.step() # 1世代進める
def step(self):
"""1世代だけ進める"""
# 次のフィールド
next_field = [[False for x in range(NUM_COL)] for y in range(NUM_ROW)]
# ライフゲームの規則にしたがって次のフィールドをセット
for y in range(NUM_ROW):
for x in range(NUM_COL):
num_alive_cells = self.around(x, y)
if num_alive_cells == 2:
# 周囲の2セルが生きていれば維持
next_field[y][x] = self.field[y][x]
elif num_alive_cells == 3:
# 周囲の3セルが生きていれば誕生
next_field[y][x] = ALIVE
else:
# それ以外では死亡
next_field[y][x] = DEAD
self.field = next_field
self.generation += 1
def draw(self, screen):
"""フィールドを描画"""
# セルを描画
for y in range(NUM_ROW):
for x in range(NUM_COL):
if self.field[y][x] == ALIVE:
pygame.draw.rect(screen, (0,255,0), Rect(x*CS,y*CS,CS,CS))
elif self.field[y][x] == DEAD:
pygame.draw.rect(screen, (0,0,0), Rect(x*CS,y*CS,CS,CS))
pygame.draw.rect(screen, (50,50,50), Rect(x*CS,y*CS,CS,CS), 1) # グリッド
# 中心線を描く
pygame.draw.line(screen, (255,0,0), (0,SCR_RECT.height/2), (SCR_RECT.width,SCR_RECT.height/2))
pygame.draw.line(screen, (255,0,0), (SCR_RECT.width/2,0), (SCR_RECT.width/2,SCR_RECT.height))
# カーソルを描画
pygame.draw.rect(screen, (0,0,255), Rect(self.cursor[0]*CS,self.cursor[1]*CS,CS,CS), 1)
# ゲーム情報を描画
screen.blit(self.font.render("generation:%d" % self.generation, True, (0,255,0)), (0,0))
# screen.blit(self.font.render("space : birth/kill", True, (0,255,0)), (0,12))
# screen.blit(self.font.render("s : start/stop", True, (0,255,0)), (0,24))
# screen.blit(self.font.render("n : next", True, (0,255,0)), (0,36))
# screen.blit(self.font.render("r : random", True, (0,255,0)), (0,48))
def around(self, x, y):
"""(x,y)の周囲8マスの生きているセルの数を返す"""
if x == 0 or x == NUM_COL-1 or y == 0 or y == NUM_ROW-1:
return 0
sum = 0
sum += self.field[y-1][x-1] # 左上
sum += self.field[y-1][x] # 上
sum += self.field[y-1][x+1] # 右上
sum += self.field[y][x-1] # 左
sum += self.field[y][x+1] # 右
sum += self.field[y+1][x-1] # 左下
sum += self.field[y+1][x] # 下
sum += self.field[y+1][x+1] # 右下
return sum
if __name__ == "__main__":
LifeGame()
| [
"n.sakki55@gmail.com"
] | n.sakki55@gmail.com |
804f8e56bedc2fc98d221bf2b9d493cdf526df9b | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /tweens/__init__.py | 084009a757dab8cd2babe574c3a960dc078fa601 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 1,512 | py | import unrealsdk # type: ignore
from Mods.ModMenu import Game, ModTypes, SDKMod
from .easing import (
back_in,
back_in_out,
back_out,
bounce_in,
bounce_in_out,
bounce_out,
circ_in,
circ_in_out,
circ_out,
cubic_in,
cubic_in_out,
cubic_out,
ease,
elastic_in,
elastic_in_out,
elastic_out,
expo_in,
expo_in_out,
expo_out,
linear,
quad_in,
quad_in_out,
quad_out,
quart_in,
quart_in_out,
quart_out,
quint_in,
quint_in_out,
quint_out,
sine_in,
sine_in_out,
sine_out,
)
from .tween import Tween
__all__ = [
"Tween",
"ease",
"linear",
"quad_in",
"quad_out",
"quad_in_out",
"cubic_in",
"cubic_out",
"cubic_in_out",
"quart_in",
"quart_out",
"quart_in_out",
"quint_in",
"quint_out",
"quint_in_out",
"sine_in",
"sine_out",
"sine_in_out",
"expo_in",
"expo_out",
"expo_in_out",
"circ_in",
"circ_out",
"circ_in_out",
"back_in",
"back_out",
"back_in_out",
"elastic_in",
"elastic_out",
"elastic_in_out",
"bounce_in",
"bounce_out",
"bounce_in_out",
]
class Tweens(SDKMod):
Name = "Tweens"
Version = "1.1"
Types = ModTypes.Library
Description = "A tweening library with various easing functions."
Author = "juso"
Status = "Enabled"
SettingsInputs = {}
SupportedGames = Game.BL2 | Game.TPS | Game.TPS
unrealsdk.RegisterMod(Tweens())
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
c18ae8bfe41026f2887bd16ea6477d010ae39f20 | e50ba4cc303d4165bef9e2917103c084cfbe0e07 | /virtual/bin/easy_install | 6b1f07b5d2a57699078d7cec02fd7d693960f2b8 | [
"MIT"
] | permissive | Antony-me/Ratemyapp | 09049fce54d3a3ed2b256970e7840d20942e8c84 | e547fea82439a3e4f83aa78bf16f93b1ea9ab00b | refs/heads/main | 2023-01-28T16:52:58.635646 | 2020-12-01T16:49:07 | 2020-12-01T16:49:07 | 316,425,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | #!/home/moringa/Documents/Moringa-Projects/CORE-PYTHON/Django/Ratemyapp/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"antonymunyasia993@gmail.com"
] | antonymunyasia993@gmail.com | |
eff42d766d3d17fe406f91087dcce5791135b309 | 2bf43e862b432d44ba545beea4e67e3e086c1a1c | /nemo_text_processing/inverse_text_normalization/de/verbalizers/decimal.py | ff3839533d7252c14f76c14c774dd3d78e9027a7 | [
"Apache-2.0"
] | permissive | ericharper/NeMo | 719e933f6ffce1b27358bc21efe87cdf144db875 | f1825bc4b724b78c2d6ca392b616e8dc9a8cde04 | refs/heads/master | 2022-10-06T01:45:21.887856 | 2022-09-14T19:09:42 | 2022-09-14T19:09:42 | 259,380,135 | 1 | 0 | Apache-2.0 | 2022-09-20T18:01:57 | 2020-04-27T15:54:20 | Python | UTF-8 | Python | false | false | 1,977 | py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_preserve_order
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "billion" } -> -12.5006 billion
Args:
tn_decimal_verbalizer: TN decimal verbalizer
"""
def __init__(self, tn_decimal_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="decimal", kind="verbalize", deterministic=deterministic)
delete_space = pynutil.delete(" ")
optional_sign = pynini.closure(
pynutil.delete("negative: \"") + NEMO_NOT_QUOTE + pynutil.delete("\"") + delete_space, 0, 1
)
optional_integer = pynini.closure(tn_decimal_verbalizer.integer, 0, 1)
optional_fractional = pynini.closure(
delete_space + pynutil.insert(",") + tn_decimal_verbalizer.fractional_default, 0, 1
)
graph = (optional_integer + optional_fractional + tn_decimal_verbalizer.optional_quantity).optimize()
self.numbers = optional_sign + graph
graph = self.numbers + delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| [
"noreply@github.com"
] | ericharper.noreply@github.com |
11ba466d6dd826bfe33dce530a4def238f1d18da | e3ddee78a8f5fdc0260d6a5d8f3cbf459dd1aaa8 | /server/apps/places/serializers/ingredients.py | bb43262b935316b8fdb24505cd1d464f33fea434 | [] | no_license | AlAstroMoody/summer_practice | 91b6110f95436f2b91334a4d1626bf2f0a505a50 | 79629bcdcf230a395a53fad0b52e75ebd7385538 | refs/heads/master | 2023-01-15T19:32:22.814893 | 2020-11-23T10:17:46 | 2020-11-23T10:17:46 | 289,492,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from rest_framework.serializers import ModelSerializer
from apps.places.models import Ingredient
class IngredientSerializer(ModelSerializer):
class Meta:
model = Ingredient
fields = ('id', 'name', 'calories')
| [
"aastrotenko@mail.ru"
] | aastrotenko@mail.ru |
176e6e7d4985abd3fc24bc06efd3afa99b86fb8c | 318a2283e9fd8386e1e9b8b33393ec21892ff053 | /tests/formatters/setupapi.py | c64b826daffd96f218ace0517a038c9f956f7480 | [
"Apache-2.0"
] | permissive | ddm1004/plaso | 3a4590f9c7fb5d624938dd1caea703dc92118646 | 88d44561754c5f981d4ab96d53186d1fc5f97f98 | refs/heads/master | 2021-05-19T10:16:24.111136 | 2020-02-27T04:40:48 | 2020-02-27T04:40:48 | 251,647,179 | 0 | 0 | Apache-2.0 | 2020-03-31T15:31:29 | 2020-03-31T15:31:29 | null | UTF-8 | Python | false | false | 918 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Windows Setupapi log event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import setupapi
from tests.formatters import test_lib
class SetupapiLogFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Windows Setupapi log event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = setupapi.SetupapiLogFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = setupapi.SetupapiLogFormatter()
expected_attribute_names = [
'entry_type',
'exit_status']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
if __name__ == '__main__':
unittest.main()
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
d0df6dfa88db30e7f5491a7bae3c0a50bff9e42b | dd223d7f6c015c3484e795934bcce62be07e48e4 | /xkyy/apps/starry/migrations/0006_auto_20190524_2005.py | 1f06dadcbb386c125e90a590b9b97e6fa00801c3 | [] | no_license | hfxjd9527/xkyy | 84e696ba8c716dc7c0fb25bf71bb82f21ba314a6 | 61df8774dc63ec1b70cc6daad52da5aa51569076 | refs/heads/master | 2022-12-17T02:35:33.597919 | 2019-06-17T00:11:50 | 2019-06-17T00:11:50 | 190,134,676 | 0 | 0 | null | 2022-12-08T01:46:24 | 2019-06-04T05:22:34 | CSS | UTF-8 | Python | false | false | 985 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2019-05-24 20:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('starry', '0005_auto_20190524_1915'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '文章', 'verbose_name_plural': '文章'},
),
migrations.AlterModelOptions(
name='category',
options={'verbose_name': '分类', 'verbose_name_plural': '分类'},
),
migrations.AddField(
model_name='article',
name='is_bigcategory',
field=models.BooleanField(default=True, verbose_name='是否大分类'),
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=20, verbose_name='分类'),
),
]
| [
"1725824530@qq.com"
] | 1725824530@qq.com |
c3fb12ce35d80f56662ff090d5961a5ed02cef2e | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/gstoption.py | f9d47d30fe88753af428273e2f8939186162e8b2 | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # encoding: utf-8
# module gstoption
# from /usr/lib64/python2.6/site-packages/gstoption.so
# by generator 1.136
# no doc
# no imports
# functions
def get_group(*args, **kwargs): # real signature unknown
pass
# no classes
| [
"pkalita@princeton.edu"
] | pkalita@princeton.edu |
e94835f09834dfba2a9778514efae815cf3f3c5f | a7d2135ca94722932a0a0edbaf1935055f5fe901 | /unsorted/governance-at-scale-account-factory/account-creation-shared/v4/src/handler.py | b297a00b7606e156a89ac6ee0088d6d0ef0068ed | [
"MIT-0"
] | permissive | awslabs/aws-service-catalog-products | b0a1c9d125758a87dd3913a00dfe029dffbb97ac | 69b295f887582b880f9af50318765f7540f34852 | refs/heads/main | 2023-08-18T06:22:11.366203 | 2022-05-12T20:34:14 | 2022-05-12T20:34:14 | 187,069,705 | 166 | 40 | MIT-0 | 2022-03-07T15:26:26 | 2019-05-16T17:07:08 | Python | UTF-8 | Python | false | false | 4,750 | py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json, logging, time
from urllib.request import Request, urlopen
from betterboto import client as betterboto_client
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
request_type = event["RequestType"]
try:
logger.info(request_type)
if request_type in ["Create", "Update"]:
assumable_role_in_root_account_arn = os.environ.get(
"ASSUMABLE_ROLE_IN_ROOT_ACCOUNT_ARN"
)
organization_account_access_role = os.environ.get(
"ORGANIZATION_ACCOUNT_ACCESS_ROLE"
)
account_name = event.get("ResourceProperties").get("AccountName")
email = event.get("ResourceProperties").get("Email")
iam_user_access_to_billing = event.get("ResourceProperties").get(
"IamUserAccessToBilling"
)
with betterboto_client.CrossAccountClientContextManager(
"organizations",
assumable_role_in_root_account_arn,
"assumable_org_role",
) as organizations:
logger.info("Checking if need to create")
response = organizations.list_accounts_single_page()
for account in response.get("Accounts", []):
if account.get("Name") == account_name:
account_id = account.get("Id")
logger.info("Already created")
send_response(
event,
context,
"SUCCESS"
if account.get("Status") == "ACTIVE"
else "FAILED",
{
"Message": "Account was already created",
"account_id": account_id,
},
)
logger.info("Creating account")
response = organizations.create_account(
Email=email,
AccountName=account_name,
RoleName=organization_account_access_role,
IamUserAccessToBilling=iam_user_access_to_billing,
)
id = response.get("CreateAccountStatus").get("Id")
logger.info("Waiting")
while response.get("CreateAccountStatus").get("State") == "IN_PROGRESS":
logger.info(
"Still waiting: {}".format(
response.get("CreateAccountStatus").get("State")
)
)
time.sleep(5)
response = organizations.describe_create_account_status(
CreateAccountRequestId=id
)
state = response.get("CreateAccountStatus").get("State")
account_id = response.get("CreateAccountStatus").get("AccountId")
logger.info(f"Finished: {state}")
send_response(
event,
context,
"SUCCESS" if state == "SUCCEEDED" else "FAILED",
{
"Message": "Account was created"
if state == "SUCCEEDED"
else f"Failed: {response.get('CreateAccountStatus').get('FailureReason')}",
"account_id": account_id,
},
)
elif request_type == "Update":
send_response(event, context, "SUCCESS", {"Message": "Updated"})
elif request_type == "Delete":
send_response(event, context, "SUCCESS", {"Message": "Deleted"})
else:
send_response(event, context, "FAILED", {"Message": "Unexpected"})
except Exception as ex:
logger.error(ex)
send_response(event, context, "FAILED", {"Message": "Exception"})
def send_response(e, c, rs, rd):
r = json.dumps(
{
"Status": rs,
"Reason": "CloudWatch Log Stream: " + c.log_stream_name,
"PhysicalResourceId": c.log_stream_name,
"StackId": e["StackId"],
"RequestId": e["RequestId"],
"LogicalResourceId": e["LogicalResourceId"],
"Data": rd,
}
)
d = str.encode(r)
h = {"content-type": "", "content-length": str(len(d))}
req = Request(e["ResponseURL"], data=d, method="PUT", headers=h)
r = urlopen(req)
logger.info("Status message: {} {}".format(r.msg, r.getcode()))
| [
"noreply@github.com"
] | awslabs.noreply@github.com |
2dfdd58a22011e454872aacc7370f3e470afa40b | 7cec0bc03100c85ae0dc4f636b92ada46de06802 | /cdad/cdadmap/migrations/0032_auto_20150805_1132.py | 7df38e612401a9aaf367bbd84e02dd4c7db2eef7 | [
"MIT"
] | permissive | NiJeLorg/CDADMap | 200040b45510f7965fd1d772f7e9627561311e70 | 1f03dccf57951748155a0094a5aec3253183c412 | refs/heads/master | 2021-01-17T10:22:06.865934 | 2018-11-01T17:12:22 | 2018-11-01T17:12:22 | 27,398,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cdadmap', '0031_auto_20150805_1124'),
]
operations = [
migrations.AddField(
model_name='surveypanel',
name='Social_Phone',
field=models.CharField(default=b'', max_length=20),
preserve_default=True,
),
migrations.AddField(
model_name='surveypanel',
name='Social_Phone_KeepPrivate',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| [
"jd@nijel.org"
] | jd@nijel.org |
3fa45a0670a11d05a42e95d7cd0148e7b33ffd41 | 53c1eb6604f9e060bd6c9ce84395ab1a38d58f6f | /exercise/sorting.py | 94c49a421eddb34bd370dbeccbe9dbaebaa9a100 | [] | no_license | turo62/exercise | 543c684ef3dfe138a5f0d6976b7ff0d9c19553f0 | 3d8d8d8a12bb3885b3015eff0032cd977c02957e | refs/heads/master | 2020-04-14T18:10:31.224244 | 2019-01-03T18:10:55 | 2019-01-03T18:10:55 | 164,008,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # Sorting list of numbers entered by the user.
def get_input():
numbers = [int(x) for x in input("Type in series of numbers with whitespaces. Push enter when complete!:").split()]
N = len(numbers)
return N, numbers
def sort_nums(N, numbers):
for i in range(N):
j = 0
while j <= N - 2:
if numbers[j] > numbers[j+1]:
temp = numbers[j+1]
numbers[j+1] = numbers[j]
numbers[j] = temp
else:
j = j+1
return numbers
def main():
N, numbers = get_input()
print(numbers)
sort_nums(N, numbers)
numbers = sort_nums(N, numbers)
print(sort_nums(N, numbers))
if __name__ == '__main__':
main()
| [
"turo62@gmail.com"
] | turo62@gmail.com |
37fe63054bc3e044a4ad349a1a55e8000c0e3c3c | 6a25d7f672c6276543d6d979b61337934557e702 | /test/imports.py | eda2d3118620601da931bf59b44aa1c63c7b5606 | [] | no_license | vsraptor/bi | bfc0bc436fb15d43dc303b948d376980085075b9 | 03b8ec4789592381c370a3c98114e4ba6f3d3fb6 | refs/heads/master | 2020-03-16T17:00:44.720209 | 2018-05-25T01:19:12 | 2018-05-25T01:19:12 | 132,814,076 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import sys, os
def import_lib() :
basedir = os.path.abspath(os.path.dirname(__file__))
for d in ['../lib/encoders', '../lib/cups', '../lib'] :
libdir = os.path.abspath(os.path.join(basedir, d));
sys.path.insert(0,libdir)
| [
"me@me.com"
] | me@me.com |
bbfd907f660bd78d2ae1a976c71f4132b01dc7b3 | 6d0b28f193bec15d979781740200d237fb13d3c1 | /apps/alertdb/migrations/0003_parameter.py | abd04a5853609f707f596f904a15937100cbf190 | [] | no_license | kelvinn/alerted-us-web | f612198c2cb59e79c2ab8386c4aa7c23861d203a | 8d0111b4ca4990cea94f6c96e88db2b1bb44a313 | refs/heads/master | 2021-09-27T00:45:49.630146 | 2020-11-19T19:46:17 | 2020-11-19T22:13:01 | 23,564,957 | 6 | 2 | null | 2021-09-22T17:38:37 | 2014-09-02T04:25:56 | Python | UTF-8 | Python | false | false | 836 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('alertdb', '0002_area'),
]
operations = [
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value_name', models.CharField(max_length=50)),
('value', models.CharField(max_length=500)),
('cap_info', models.ForeignKey(to_field='id', blank=True, to='alertdb.Info', null=True,
on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
| [
"kelvin@kelvinism.com"
] | kelvin@kelvinism.com |
eec4eb6256b94dce846461034d55fa43389f032b | 905e7882e7bb870c7de056687578d91789f26b4d | /pre-benchmarks/mike/performance/bm_fannkuch.py | 64d83e661d5d63a8940ba0444191ee299856bd1e | [] | no_license | glennneiger/retic_performance | 091d1749c07496c57e64a6b0ba4fd58b0e52bc45 | 025732be3a426e9188781d0f182918b2ba946dea | refs/heads/master | 2020-04-20T08:33:18.440975 | 2017-03-20T00:12:51 | 2017-03-20T00:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | # The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
#
# contributed by Sokolov Yura
# modified by Tupteq
import optparse
import time
import util
from compat import xrange
def fannkuch(n):
count = list(range(1, n+1))
max_flips = 0
m = n-1
r = n
check = 0
perm1 = list(range(n))
perm = list(range(n))
perm1_ins = perm1.insert
perm1_pop = perm1.pop
while 1:
if check < 30:
#print "".join(str(i+1) for i in perm1)
check += 1
while r != 1:
count[r-1] = r
r -= 1
if perm1[0] != 0 and perm1[m] != m:
perm = perm1[:]
flips_count = 0
k = perm[0]
while k:
perm[:k+1] = perm[k::-1]
flips_count += 1
k = perm[0]
if flips_count > max_flips:
max_flips = flips_count
while r != n:
perm1_ins(r, perm1_pop(0))
count[r] -= 1
if count[r] > 0:
break
r += 1
else:
return max_flips
DEFAULT_ARG = 9
def main(n, timer):
times = []
for i in xrange(n):
t0 = timer()
fannkuch(DEFAULT_ARG)
tk = timer()
times.append(tk - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the Float benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
| [
"migeed.z@outlook.com"
] | migeed.z@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.