blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8892a90c709b1fad91ec73e656344c01c93f3649 | 2ac169bf1294df6069db95e5362df2376844faec | /urllib3/backends/trio_backend.py | 0c2b016fe82561fd0a99cff7d4de0705657c6d91 | [
"MIT"
] | permissive | merrellb/urllib3 | a9000632f54a68cb1f4bd9ee9618e39e89f538b1 | dd9d52eb1e69227bb02d6a0fcbc2771e4c4e54fd | refs/heads/bleach-spike | 2021-01-16T19:37:28.619347 | 2017-08-04T21:51:38 | 2017-08-04T21:51:38 | 100,185,249 | 0 | 0 | null | 2017-08-13T15:15:17 | 2017-08-13T15:15:16 | null | UTF-8 | Python | false | false | 2,740 | py | import trio
from . import LoopAbort
from ._util import is_readable
class TrioBackend:
async def connect(
self, host, port, source_address=None, socket_options=None):
if source_address is not None:
# You can't really combine source_address= and happy eyeballs
# (can we get rid of source_address? or at least make it a source
# ip, no port?)
raise NotImplementedError(
"trio backend doesn't support setting source_address")
stream = await trio.open_tcp_stream(host, port)
for (level, optname, value) in socket_options:
stream.setsockopt(level, optname, value)
return TrioSocket(stream)
# XX it turns out that we don't need SSLStream to be robustified against
# cancellation, but we probably should do something to detect when the stream
# has been broken by cancellation (e.g. a timeout) and make is_readable return
# True so the connection won't be reused.
class TrioSocket:
def __init__(self, stream):
self._stream = stream
async def start_tls(self, server_hostname, ssl_context):
wrapped = trio.ssl.SSLStream(
self._stream, ssl_context,
server_hostname=server_hostname,
https_compatible=True)
return TrioSocket(wrapped)
def getpeercert(self, binary=False):
return self._stream.getpeercert(binary=binary)
async def receive_some(self):
return await self._stream.receive_some(BUFSIZE)
async def send_and_receive_for_a_while(produce_bytes, consume_bytes):
async def sender():
while True:
outgoing = await produce_bytes()
if outgoing is None:
break
await self._stream.send_all(outgoing)
async def receiver():
while True:
incoming = await stream.receive_some(BUFSIZE)
consume_bytes(incoming)
try:
async with trio.open_nursery() as nursery:
nursery.spawn(sender)
nursery.spawn(receiver)
except LoopAbort:
pass
def forceful_close(self):
self._stream.forceful_close()
def is_readable(self):
# This is a bit of a hack, but I can't think of a better API that trio
# *could* provide, since what we want to check here is such an odd
# thing.
sock_stream = self._stream
# Strip off SSLStream wrappings
while hasattr(sock_stream, "transport_stream"):
sock_stream = sock_stream.transport_stream
sock = sock_stream.socket
return is_readable(sock)
def set_readable_watch_state(self, enabled):
pass
| [
"njs@pobox.com"
] | njs@pobox.com |
e75e43698a4d7ee8fab67e0ead070703180f9d66 | d83cd7bfec9c71ef3186546fd7423082415fca39 | /src/old/forums->JSON.py | 7b786d4da19838744f6d985dd71366cc646cf927 | [] | no_license | mac389/phikal | e139d7e20e6cda0fcedca9eb4a5a5ff397f5f49a | b5162a3bab7320ed3d67bb6b7c9c3a3f03c2ba5a | refs/heads/master | 2021-01-15T15:36:37.568339 | 2016-09-04T18:18:23 | 2016-09-04T18:18:23 | 56,150,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | import json, os
db = {}
WRITE = 'wb'
for filename in os.listdir(os.path.join(os.getcwd(),'forums')):
db[filename] = {}
db[filename]['text'] = open(os.path.join(os.getcwd(),'forums',filename)).read()
json.dump(db,open('db.json',WRITE)) | [
"mac389@gmail.com"
] | mac389@gmail.com |
eaa24b780fa830d12b2c79f659cfa00efdfda6ca | c2be187155aabf59a4c0d3f5065bc26239c0b827 | /master_category/converters/template_to_googlemanufacturer.py | b2cf214fc975e92734e471245aef63c2ddb66c20 | [] | no_license | dankCodeNugs/tmtext | 1d6c54f79399bfa5e6f3905c0f72ba0be59d8d0d | 8e2d834775f440def7f57294674b8109b46ee191 | refs/heads/master | 2023-03-16T08:49:38.456929 | 2016-12-20T19:45:54 | 2016-12-20T19:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,363 | py | import os
from lxml import html
import csv
import xlrd
from helper import check_extension, logging_info, write_to_file, convert_xls_file
#-------- define column names ------------
UPC = 'UPC'
MPN = 'MPN'
BRAND = 'Brand Name'
TITLE = 'Item Name'
GTIN = 'GTIN'
DESC = 'Description'
L_DESC = 'Long Description'
#-----------------------------------------
def convert_upc_to_gtin(upc):
s_upc = u''
if type(upc) == str or type(upc) == unicode:
s_upc = upc
elif type(upc) == float:
s_upc = u'%.f' % upc
gtin_code = u''
if len(s_upc) <= 14 or len(s_upc) >= 12:
gtin_code = s_upc
if len(s_upc) == 8:
gtin_code = u'0000' + s_upc
return gtin_code
def generate_bullets(desc):
if desc == '':
return []
tree_description = html.fromstring(desc)
#--------- Description / CSV
tree_bullets = \
tree_description.xpath("//*[contains(@id,'feature-bullets')]//ul/"
"li[not(contains(@class,'hidden'))]")
try:
bullet_points = [b.text_content().strip() for b in tree_bullets if b.text_content().strip() != '']
except Exception as e:
bullet_points = []
logging_info('Bullet parse error')
if len(tree_bullets) > 0:
return bullet_points
#--------- Long Description / Amazon
tree_bullets = \
tree_description.xpath("//ul/li")
try:
bullet_points = [b.text_content().strip() for b in tree_bullets if b.text_content().strip() != '']
except Exception as e:
bullet_points = []
logging_info('Bullet parse error')
if len(tree_bullets) > 0:
return bullet_points
#--------- Long Description / Walmart
tree_bullets = \
tree_description.xpath("//p")
try:
bullet_points = [b.text_content().strip() for b in tree_bullets if b.text_content().strip() != '']
except Exception as e:
bullet_points = []
logging_info('Bullet parse error')
if len(tree_bullets) > 0:
return bullet_points
return [desc]
def parse_xls_value(val):
if type(val) == float:
return '%.f' % val
return val
def generate_google_manufacturer_xml(template_env, input_file):
available_extensions = ['.csv', '.xls']
items = []
context = {}
if not check_extension(input_file, available_extensions):
logging_info('The file extension should be %s.'
% (','.join(available_extensions)), 'ERROR')
return
try:
name, file_extension = os.path.splitext(input_file)
ci = {}
# The ci will take column index like this
# {
# 'MPN': -1, # The column index of the MPN field
# 'Brand Name': -1,
# 'Item Name': -1,
# 'GTIN': -1,
# 'Description': -1,
# 'Long Description': -1
# }
if file_extension == '.csv':
with open(input_file, 'rU') as csvfile:
reader = csv.reader(csvfile)
for idx, item in enumerate(reader):
if idx == 0:
for i, c in enumerate(item):
ci[c] = i
else:
data = {
'id': item[ci[MPN]] if ci[MPN] > -1 else '',
'brand': item[ci[BRAND]] if ci[BRAND] > -1 else '',
'title': item[ci[TITLE]] if ci[TITLE] > -1 else '',
'gtin': item[ci[GTIN]] if ci[GTIN] > -1 else '',
'mpn': item[ci[MPN]] if ci[MPN] > -1 else '',
'description': item[ci[DESC]] if ci[DESC] > -1 else '',
'bullet_points': item[ci[L_DESC]] if ci[L_DESC] > -1 else '',
}
data['bullet_points'] = generate_bullets(data['bullet_points'])
if data['gtin'] == '':
if ci[UPC] > -1 and item[ci[UPC]] != '':
data['gtin'] = convert_upc_to_gtin(item[ci[UPC]])
items.append(data)
else: # .xls file
logging_info('START CONVERSION')
# xlrd could not read xls file that generated by PHPExcel
# so we make file conversion
input_file_c = convert_xls_file(input_file)
logging_info('END CONVERSION')
if input_file_c == '':
raise Exception('Could not convert xml file')
wb = xlrd.open_workbook(filename=input_file_c)
s_names = wb.sheet_names()
for sn in s_names:
item_sheet = wb.sheet_by_name(sn)
for idx, row in enumerate(item_sheet.get_rows()):
if idx == 0:
for i, c in enumerate(row):
ci[c.value] = i
else:
data = {
'id': parse_xls_value(row[ci[MPN]].value) if ci[MPN] > -1 else '',
'brand': row[ci[BRAND]].value if ci[BRAND] > -1 else '',
'title': row[ci[TITLE]].value if ci[TITLE] > -1 else '',
'gtin': parse_xls_value(row[ci[GTIN]].value) if ci[GTIN] > -1 else '',
'mpn': parse_xls_value(row[ci[MPN]].value) if ci[MPN] > -1 else '',
'description': row[ci[DESC]].value if ci[DESC] > -1 else '',
'bullet_points': row[ci[L_DESC]].value if ci[L_DESC] > -1 else '',
}
data['bullet_points'] = generate_bullets(data['bullet_points'])
if data['gtin'] == '':
if ci[UPC] > -1 and row[ci[UPC]] != '':
data['gtin'] = convert_upc_to_gtin(row[ci[UPC]].value)
items.append(data)
except Exception as e:
logging_info(str(e), 'ERROR')
return
context['items'] = items
template = template_env.get_template('GoogleManufacturer.html')
output_content = template.render(context).encode('utf-8')
filename = write_to_file(output_content)
logging_info(filename, 'RESULT_FILE')
logging_info('google-manufacturer.xml', 'FILE_NAME') | [
"life.long.learner127@outlook.com"
] | life.long.learner127@outlook.com |
80a4b87e6030c74944a0822f7667bbfb7fd38fa7 | d5a5ee613027429ed654dbae11d3a18af6c379f8 | /timber_modisette/pythonDjango/dojosecrets/apps/dojo_secrets/models.py | 5ffb2d167ea093833f5a5a773c29443a4868d1b3 | [] | no_license | husainun/python_april_2017 | 1c3d04792bfe50f57f0f3238c06dca49c605f4b1 | 633f3451ae99eaa94a97fdf2647de38db03e49be | refs/heads/master | 2020-05-15T09:22:06.794597 | 2017-06-13T15:35:22 | 2017-06-13T15:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | from __future__ import unicode_literals
from django.db import models
import re
# Create your models here.
class UserManager(models.Manager):
def validateUser(self,post):
is_valid = True
errors =[]
if len(post.get('first_name')) == 0:
is_valid = False
errors.append("Must enter valid first name")
if len(post.get('last_name')) == 0:
is_valid = False
errors.append("Must enter valid last name")
if not re.search(r'\w+\@+\w+\.\w+', post.get('email')):
is_valid = False
errors.append("Please enter a valid email address")
if User.objects.filter(email=post.get('email')).first() != None:
is_valid = False
errors.append("email already exists")
if len(post.get('password')) < 6:
is_valid = False
errors.append("please enter a password of at least 6 characters")
if post.get('password') != post.get('cf_password'):
is_valid = False
errors.append("passwords do not match")
return (is_valid, errors)
class User(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
email = models.EmailField(max_length=200)
password = models.CharField(max_length=100)
cf_password = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class Post(models.Model):
content = models.TextField(max_length=1000)
user = models.ForeignKey(User, related_name='posts')
likes = models.ManyToManyField(User, related_name='liked_posts')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"mister.modistette@gmail.com"
] | mister.modistette@gmail.com |
7b903bffae937420e3d65bd50a9afd654f013f43 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/abresler/super_simple_twitter_scraper_8.py | f3b967c560a5cccaf86c33bbefe8ff74a186e952 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,230 | py | ###################################################################################
# Super Simple Twitter Scraper
###################################################################################
import scraperwiki
import simplejson
import urllib2
import csv
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'korean and delicious'
RESULTS_PER_PAGE = '100'
LANGUAGE = ''
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Super Simple Twitter Scraper
###################################################################################
import scraperwiki
import simplejson
import urllib2
import csv
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'korean and delicious'
RESULTS_PER_PAGE = '100'
LANGUAGE = ''
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
af0251879607882ceeffe19420e0ab47b8a7b0c4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03804/s510368755.py | 7c3e304c260b97d80771fc571df678da7b42baf3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | n, m = map(int, input().split())
a = [input() for _ in range(n)]
b = [input() for _ in range(m)]
ans = 'No'
for x in range(0, n - m + 1):
for y in range(0, n - m + 1):
check = True
for i in range(m):
for j in range(m):
if a[i+x][j+y] != b[i][j]:
check = False
break
if check == False:
break
if check == True:
ans = 'Yes'
break
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e88c846218f4fd40e81aa568be49a6bde82586ad | 1626e16760c9c5b5dc9bd7c345871c716d5ffd99 | /Problems/0800_0899/0861_Score_After_Flipping_Matrix/Project_Python3/Score_After_Flipping_Matrix.py | 528676c4fee1db599a5bb4afb91bf8dba6f59098 | [] | no_license | NobuyukiInoue/LeetCode | 94ddb19e63cb8d0775cdc13f311fe90c87a1d718 | 3f0ffd519404165fd1a735441b212c801fd1ad1e | refs/heads/master | 2023-09-01T07:38:50.939942 | 2023-08-23T09:51:17 | 2023-08-23T09:51:17 | 158,100,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | # coding: utf-8
import os
import sys
import time
from typing import List, Dict, Tuple
class Solution:
def matrixScore(self, A: List[List[int]]) -> int:
# 40ms
r, c = len(A), len(A[0])
answer = r * (1 << (c - 1))
for j in range(1, c):
count = 0
for i in range(r):
if A[i][0] == 1:
count += A[i][j]
else:
count += A[i][j] ^ 1
answer += max(r - count, count) * (1 << (c - 1 - j))
return answer
def printGrid(title, grid):
print("{0} = [".format(title))
for i in range(len(grid)):
if i == 0:
print(" [", end = "")
else:
print(",[", end = "")
for j in range(len(grid[i])):
if j == 0:
print("{0:d}".format(grid[i][j]), end = "")
else:
print(",{0:d}".format(grid[i][j]), end = "")
print("]")
print("]")
def printResult(title, result):
print("{0} = [".format(title))
for i in range(len(result)):
print(result[i])
print("]")
def main():
argv = sys.argv
argc = len(argv)
if argc < 2:
print("Usage: python {0} <testdata.txt>".format(argv[0]))
exit(0)
if not os.path.exists(argv[1]):
print("{0} not found...".format(argv[1]))
exit(0)
testDataFile = open(argv[1], "r")
lines = testDataFile.readlines()
for temp in lines:
temp = temp.strip()
if temp == "":
continue
print("args = {0}".format(temp))
loop_main(temp)
# print("Hit Return to continue...")
# input()
def loop_main(temp):
flds = temp.replace(" ","").replace("\"","").replace("[[","").replace("]]","").rstrip()
A = [[int(col) for col in data.split(",")] for data in flds.split("],[")]
printGrid("A", A)
sl = Solution()
time0 = time.time()
result = sl.matrixScore(A)
time1 = time.time()
print("result = {0:d}".format(result))
print("Execute time ... : {0:f}[s]\n".format(time1 - time0))
if __name__ == "__main__":
main()
| [
"gx3n-inue@asahi-net.or.jp"
] | gx3n-inue@asahi-net.or.jp |
256dba10a1f9e4655c2d1f4b3024442dc6b1f1f1 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /sts_write_3/role-with-saml_assume.py | bd754cf1c2a82bb42705ba084e7a21c5716a333e | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sts/assume-role-with-saml.html
if __name__ == '__main__':
"""
"""
parameter_display_string = """
# role-arn : The Amazon Resource Name (ARN) of the role that the caller is assuming.
# principal-arn : The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the IdP.
# saml-assertion : The base-64 encoded SAML authentication response provided by the IdP.
For more information, see Configuring a Relying Party and Adding Claims in the IAM User Guide .
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("sts", "assume-role-with-saml", "role-arn", "principal-arn", "saml-assertion", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
e328506e24b558eac711bd04cb331a655324fecc | ad99cf70a4ab84e0eb3971718147869c639e8976 | /backend/menu/api/v1/viewsets.py | 31633f37a05075e119710cd97bb2f559e1407d28 | [] | no_license | crowdbotics-apps/ellensburg-delivery-22553 | 60d3caf6ed99543a0e8362546f33b0d1bb07c825 | 3f7a5d1d4af602445a2715ca06fc3e327b9297da | refs/heads/master | 2023-01-12T09:07:08.250685 | 2020-11-12T08:14:21 | 2020-11-12T08:14:21 | 312,209,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | from rest_framework import authentication
from menu.models import Category, Country, Item, ItemVariant, Review
from .serializers import (
CategorySerializer,
CountrySerializer,
ItemSerializer,
ItemVariantSerializer,
ReviewSerializer,
)
from rest_framework import viewsets
class ItemViewSet(viewsets.ModelViewSet):
serializer_class = ItemSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Item.objects.all()
class ReviewViewSet(viewsets.ModelViewSet):
serializer_class = ReviewSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Review.objects.all()
class CountryViewSet(viewsets.ModelViewSet):
serializer_class = CountrySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Country.objects.all()
class ItemVariantViewSet(viewsets.ModelViewSet):
serializer_class = ItemVariantSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ItemVariant.objects.all()
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Category.objects.all()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
437b658890538d63a1cdda99d98e43a01604f019 | fff4e1431391956292afe5514b03f81035a3f402 | /GAN/GAN.py | b6e5893f41e42b03e882097c4acf7ad3f796facb | [] | no_license | recomdDN/DL_practice | b1d0172bc91a735a32ae01379965247bc9b71a91 | 5addd8629533b7547a62f68c518138af48e174f2 | refs/heads/master | 2020-04-16T23:02:12.324842 | 2019-02-18T09:29:53 | 2019-02-18T09:29:53 | 165,995,401 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,430 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
from skimage.io import imsave
import os
import shutil
img_width = 28
img_height = 28
img_size = img_height * img_width
to_train = True
to_restore = False
output_path = 'output'
#总迭代次数500次
max_epoch = 500
h1_size = 150
h2_size = 300
z_size = 100
batch_size = 256
def build_generator(z_prior):
w1 = tf.Variable(tf.truncated_normal([z_size,h1_size],stddev=0.1),name='g_w1',dtype=tf.float32)
b1 = tf.Variable(tf.zeros([h1_size]),name='g_b1',dtype=tf.float32)
h1 = tf.nn.relu(tf.matmul(z_prior,w1) + b1)
w2 = tf.Variable(tf.truncated_normal([h1_size,h2_size],stddev=0.1),name='g_w2',dtype=tf.float32)
b2 = tf.Variable(tf.zeros([h2_size]),name='g_b2',dtype=tf.float32)
h2 = tf.nn.relu(tf.matmul(h1,w2)+b2)
w3 = tf.Variable(tf.truncated_normal([h2_size,img_size],stddev=0.1),name='g_w3',dtype=tf.float32)
b3 = tf.Variable(tf.zeros([img_size]),name='g_b3',dtype=tf.float32)
h3 = tf.matmul(h2,w3)+b3
x_generate = tf.nn.tanh(h3)
g_params = [w1,b1,w2,b2,w3,b3]
return x_generate,g_params
def build_discriminator(x_data,x_generated,keep_prob):
#将real img 和 generated img拼在一起
x_in = tf.concat([x_data,x_generated],0)
w1 = tf.Variable(tf.truncated_normal([img_size,h2_size],stddev=0.1),name='d_w1',dtype=tf.float32)
b1 = tf.Variable(tf.zeros([h2_size]),name='d_b1',dtype=tf.float32)
h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_in,w1)+b1),keep_prob)
w2 = tf.Variable(tf.truncated_normal([h2_size,h1_size],stddev=0.1),name='d_w2',dtype=tf.float32)
b2 = tf.Variable(tf.zeros([h1_size]),name='d_b2',dtype=tf.float32)
h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(h1,w2)+b2),keep_prob)
w3 = tf.Variable(tf.truncated_normal([h1_size,1]),name='d_w3',dtype=tf.float32)
b3 = tf.Variable(tf.zeros([1]),name='d_b3',dtype=tf.float32)
h3 = tf.matmul(h2,w3)+b3
"""
1,函数原型 tf.slice(inputs,begin,size,name='')
2,用途:从inputs中抽取部分内容
inputs:可以是list,array,tensor
begin:n维列表,begin[i] 表示从inputs中第i维抽取数据时,相对0的起始偏移量,也就是从第i维的begin[i]开始抽取数据
size:n维列表,size[i]表示要抽取的第i维元素的数目
有几个关系式如下:
(1) i in [0,n]
(2)tf.shape(inputs)[0]=len(begin)=len(size)
(3)begin[i]>=0 抽取第i维元素的起始位置要大于等于0
(4)begin[i]+size[i]<=tf.shape(inputs)[i]
"""
"""
h3的size:[batch_size + batch_size,1]
所以 y_data 是对 real img的判别结果
y_generated 是对 generated img 的判别结果
"""
y_data = tf.nn.sigmoid(tf.slice(h3,[0,0],[batch_size,-1],name=None))
y_generated = tf.nn.sigmoid(tf.slice(h3,[batch_size,0],[-1,-1],name=None))
d_params = [w1,b1,w2,b2,w3,b3]
return y_data,y_generated,d_params
def show_result(batch_res,fname,grid_size=(0,0),grid_pad=5):
batch_res = 0.5 * batch_res.reshape((batch_res.shape[0], img_height, img_width)) + 0.5
img_h, img_w = batch_res.shape[1], batch_res.shape[2]
grid_h = img_h * grid_size[0] + grid_pad * (grid_size[0] - 1)
grid_w = img_w * grid_size[1] + grid_pad * (grid_size[1] - 1)
img_grid = np.zeros((grid_h, grid_w), dtype=np.uint8)
for i, res in enumerate(batch_res):
if i >= grid_size[0] * grid_size[1]:
break
img = (res) * 255
img = img.astype(np.uint8)
row = (i // grid_size[0]) * (img_h + grid_pad)
col = (i % grid_size[1]) * (img_w + grid_pad)
img_grid[row:row + img_h, col:col + img_w] = img
imsave(fname, img_grid)
def train():
# load data
mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
x_data = tf.placeholder(tf.float32,[None,img_size],name='x_data')
z_prior = tf.placeholder(tf.float32,[None,z_size],name='z_prior')
keep_prob = tf.placeholder(tf.float32,name='keep_prob')
global_step = tf.Variable(0,name="global_step",trainable=False)
x_generated,g_params = build_generator(z_prior)
y_data,y_generated,d_params = build_discriminator(x_data,x_generated,keep_prob)
d_loss =-( tf.log(y_data) + tf.log(1-y_generated))
g_loss = -(tf.log(y_generated))
optimizer = tf.train.AdamOptimizer(0.0001)
d_trainer= optimizer.minimize(d_loss,var_list=d_params)
g_trainer = optimizer.minimize(g_loss,var_list=g_params)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
if to_restore:
chkpt_fname = tf.train.latest_checkpoint(output_path)
saver.restore(sess, chkpt_fname)
else:
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
z_sample_val = np.random.normal(0,1,size=(batch_size,z_size)).astype(np.float32)
steps = 60000 / batch_size
for i in range(sess.run(global_step),max_epoch):
for j in np.arange(steps):
print("epoch:%s, iter:%s" % (i, j))
# 每一步迭代,我们都会加载256个训练样本,然后执行一次train_step
x_value, _ = mnist.train.next_batch(batch_size)
x_value = 2 * x_value.astype(np.float32) - 1
z_value = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
# 执行生成
sess.run(d_trainer,
feed_dict={x_data: x_value, z_prior: z_value, keep_prob: 0.7})
# 执行判别
if j % 1 == 0:
sess.run(g_trainer,
feed_dict={x_data: x_value, z_prior: z_value, keep_prob: 0.7})
x_gen_val = sess.run(x_generated, feed_dict={z_prior: z_sample_val})
show_result(x_gen_val, "output/sample{0}.jpg".format(i))
z_random_sample_val = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
x_gen_val = sess.run(x_generated, feed_dict={z_prior: z_random_sample_val})
show_result(x_gen_val, "output/random_sample{0}.jpg".format(i))
sess.run(tf.assign(global_step, i + 1))
saver.save(sess, os.path.join(output_path, "model"), global_step=global_step)
train() | [
"593476874@qq.com"
] | 593476874@qq.com |
d3cc50f4cabcb9c5c39980932845050fda43a332 | 4a1336d6d3de077aab654bd315ddeb4cf261256f | /spider/headerchange/headerchange/spiders/headervalidation.py | 8ced065d76f34ea2e30825c27194917c00b92c1b | [
"Apache-2.0"
] | permissive | barneyElDinosaurio/base_function | e84be6da0fc2681421cefdbb6792dfda74031fce | d4ecbd4e8dfe43ed9c02ac430ce4598bfca0fc25 | refs/heads/master | 2020-04-04T20:35:48.477181 | 2018-11-05T10:34:34 | 2018-11-05T10:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # -*- coding: utf-8 -*-
import scrapy
from headerchange.user_agents import agents
import random
import json
class HeadervalidationSpider(scrapy.Spider):
name = 'headervalidation'
def start_requests(self):
url='http://httpbin.org/ip'
for i in range(5):
yield scrapy.Request(url=url,dont_filter=True)
def parse(self, response):
print('*'*20)
print(response.text)
# print(json.loads(response.body_as_unicode()).get('headers').get('User-Agent'))
print('*'*20)
| [
"jinweizsu@gmail.com"
] | jinweizsu@gmail.com |
43f1604b8b228c0f2c7e723d31a650b9d2e13cc0 | e20978855798f958a460b318c247d80515edbf8e | /Node.py | c1eb13f840cd772767fb1d467922f602bdb64d46 | [] | no_license | hanifsarwary/ThesisALgo | 846df062524fc62718e6bf45aafa2ce895d98dfd | 01184740c139099055e290bb9a14454b389f8bd9 | refs/heads/master | 2020-07-12T07:54:09.153225 | 2019-09-09T13:05:16 | 2019-09-09T13:05:16 | 204,759,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | class Node:
def __init__(self, name, parent=None, children=None, is_mendatory=None, is_alternative=None, is_or=None):
self.name = name
self.parent = parent
self.children = children
self.is_mendatory = is_mendatory
self.is_alternative = is_alternative
self.is_or = is_or
def __str__(self):
stri = self.name +" child of " + self.parent.name
if self.is_mendatory:
stri+= " it is mandetory"
elif self.is_alternative:
stri += " it is alternative"
elif self.is_or:
stri += " it is or"
return stri | [
"mianhanif13@gmail.com"
] | mianhanif13@gmail.com |
185854112af879ccfa14120f7425b4b1445cb903 | 91c204ce2b5c8793effdcb5a8e958536e4fa253f | /main/migrations/0020_cult_date_created.py | c41a1a0d1d962a8225847ae9fd4a990af925f662 | [
"MIT"
] | permissive | send2cloud/opencult.com | eb20bdecbaee741e43831cbeb430fec73d45947f | 22888ab7f66d577ec8940c8b1f45a1412509389b | refs/heads/master | 2021-04-26T23:21:58.349125 | 2018-03-04T19:53:08 | 2018-03-04T19:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Generated by Django 2.0.1 on 2018-02-10 15:37
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('main', '0019_auto_20180210_1402'),
]
operations = [
migrations.AddField(
model_name='cult',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"theodorekeloglou@gmail.com"
] | theodorekeloglou@gmail.com |
f7d6097ad8e36722971a2dee7fe0e8bb7ba02e76 | ed2ea3c4bb8809b8aad0977f193f265478696a25 | /ldap_sync/concepts/__init__.py | f0428145b5ba089ae875216a6ae92016a3e406fa | [
"Apache-2.0"
] | permissive | agdsn/pycroft | 4d40f0f2b61ec48fb1ed9df7f5227e59e5e2b946 | 9f3abb5dc1a7dd54c577af37d5004dd2204739cd | refs/heads/develop | 2023-08-31T10:52:15.658921 | 2023-08-27T12:53:37 | 2023-08-27T12:53:37 | 56,711,411 | 21 | 15 | Apache-2.0 | 2023-09-05T00:51:57 | 2016-04-20T18:36:36 | Python | UTF-8 | Python | false | false | 257 | py | # Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
"""
ldap_sync.concepts
~~~~~~~~~~~~~~~~~~
"""
| [
"lukas.juhrich@agdsn.de"
] | lukas.juhrich@agdsn.de |
be6728759401ccad5796401b07e2d4c3d04d2771 | b2f4d7275b407b56309d8e5ede3b1c75a18c7871 | /MxOnline/apps/courses/migrations/0006_bannercorse.py | 11d3e518cb1f0fde38dc95280b2de831c0269532 | [] | no_license | renlei-great/MxOnline | 9157459cea21bb20379dbe5d285f4aecac026090 | 01454796473cf2fbb10be315e8340bda715159b2 | refs/heads/master | 2021-01-09T14:47:54.530942 | 2020-05-09T16:37:33 | 2020-05-09T16:37:33 | 242,342,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Generated by Django 2.2 on 2020-03-20 21:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0005_course_is_banner'),
]
operations = [
migrations.CreateModel(
name='BannerCorse',
fields=[
],
options={
'verbose_name': '课程轮播',
'verbose_name_plural': '课程轮播',
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('courses.course',),
),
]
| [
"1415977534@qq.com"
] | 1415977534@qq.com |
e9dd6b17803be564c025bd5076ef9dfa8fbff2ec | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/79/usersdata/157/43823/submittedfiles/serie1.py | 876643f429b71443f76ea7727c5a88460859f3ce | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | n=int(input('Informe um valor:'))
s=0
for i in range (1,n+1,1):
if (i%2==1):
s=s+(i**2))
else:
s=s-(i/(i**2))
print('%.5f'%s) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
21e6f7a73ae5b4b0a60276196bd64a2f0b5af5f4 | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/basics/converting_to_int_via_float.py | e4a380c6955ace2a5bc7d9e1ebeb9c41f61efb5d | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 385 | py | a = "2.1"
b = float(a)
c = int(b)
print(c) # 2
print( type(a) ) # <class 'str'>
print( type(b) ) # <class 'float'>
print( type(c) ) # <class 'int'>
d = int( float(a) )
print(d) # 2
print( type(d) ) # <class 'int'>
print( int( float(2.1) )) # 2
print( int( float("2") )) # 2
print( int( float(2) )) # 2
| [
"gabor@szabgab.com"
] | gabor@szabgab.com |
11905ea741213e9e24040446860c1f72c54dec7a | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/jedi/inference/flow_analysis.py | 89bfe578dfcb4dd2770c6218ed32d374639910fb | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 4,583 | py | from typing import Dict, Optional
from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope
from jedi.inference.recursion import execution_allowed
from jedi.inference.helpers import is_big_annoying_library
class Status:
lookup_table: Dict[Optional[bool], 'Status'] = {}
def __init__(self, value: Optional[bool], name: str) -> None:
self._value = value
self._name = name
Status.lookup_table[value] = self
def invert(self):
if self is REACHABLE:
return UNREACHABLE
elif self is UNREACHABLE:
return REACHABLE
else:
return UNSURE
def __and__(self, other):
if UNSURE in (self, other):
return UNSURE
else:
return REACHABLE if self._value and other._value else UNREACHABLE
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name)
REACHABLE = Status(True, 'reachable')
UNREACHABLE = Status(False, 'unreachable')
UNSURE = Status(None, 'unsure')
def _get_flow_scopes(node):
while True:
node = get_parent_scope(node, include_flows=True)
if node is None or is_scope(node):
return
yield node
def reachability_check(context, value_scope, node, origin_scope=None):
if is_big_annoying_library(context) \
or not context.inference_state.flow_analysis_enabled:
return UNSURE
first_flow_scope = get_parent_scope(node, include_flows=True)
if origin_scope is not None:
origin_flow_scopes = list(_get_flow_scopes(origin_scope))
node_flow_scopes = list(_get_flow_scopes(node))
branch_matches = True
for flow_scope in origin_flow_scopes:
if flow_scope in node_flow_scopes:
node_keyword = get_flow_branch_keyword(flow_scope, node)
origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope)
branch_matches = node_keyword == origin_keyword
if flow_scope.type == 'if_stmt':
if not branch_matches:
return UNREACHABLE
elif flow_scope.type == 'try_stmt':
if not branch_matches and origin_keyword == 'else' \
and node_keyword == 'except':
return UNREACHABLE
if branch_matches:
break
# Direct parents get resolved, we filter scopes that are separate
# branches. This makes sense for autocompletion and static analysis.
# For actual Python it doesn't matter, because we're talking about
# potentially unreachable code.
# e.g. `if 0:` would cause all name lookup within the flow make
# unaccessible. This is not a "problem" in Python, because the code is
# never called. In Jedi though, we still want to infer types.
while origin_scope is not None:
if first_flow_scope == origin_scope and branch_matches:
return REACHABLE
origin_scope = origin_scope.parent
return _break_check(context, value_scope, first_flow_scope, node)
def _break_check(context, value_scope, flow_scope, node):
reachable = REACHABLE
if flow_scope.type == 'if_stmt':
if flow_scope.is_node_after_else(node):
for check_node in flow_scope.get_test_nodes():
reachable = _check_if(context, check_node)
if reachable in (REACHABLE, UNSURE):
break
reachable = reachable.invert()
else:
flow_node = flow_scope.get_corresponding_test_node(node)
if flow_node is not None:
reachable = _check_if(context, flow_node)
elif flow_scope.type in ('try_stmt', 'while_stmt'):
return UNSURE
# Only reachable branches need to be examined further.
if reachable in (UNREACHABLE, UNSURE):
return reachable
if value_scope != flow_scope and value_scope != flow_scope.parent:
flow_scope = get_parent_scope(flow_scope, include_flows=True)
return reachable & _break_check(context, value_scope, flow_scope, node)
else:
return reachable
def _check_if(context, node):
with execution_allowed(context.inference_state, node) as allowed:
if not allowed:
return UNSURE
types = context.infer_node(node)
values = set(x.py__bool__() for x in types)
if len(values) == 1:
return Status.lookup_table[values.pop()]
else:
return UNSURE
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
9a79fa81dfbdd295757923dad43e8a3a4edf1a0c | 022104aa2456429356bdd26c701a2949381a83cf | /build/campero/campero_common/campero_pad/catkin_generated/pkg.develspace.context.pc.py | f99ca17b449a9e448c76c1b55c6efcc0f936148a | [] | no_license | nachocz/campero_ws | 204f313d5fbdb81d1f7cc568341a1170ddd2b4cf | f2b09f96165166c0e867e3f5f3dcd092dbac1c1b | refs/heads/master | 2023-02-02T03:25:56.603172 | 2020-12-11T11:28:42 | 2020-12-11T11:28:42 | 320,539,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/pc-campero2/campero_ws/devel/include".split(';') if "/home/pc-campero2/campero_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "robotnik_msgs;roscpp;sensor_msgs;geometry_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "campero_pad"
PROJECT_SPACE_DIR = "/home/pc-campero2/campero_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"nachocz@gmail.com"
] | nachocz@gmail.com |
5f4b5bcbabc535fe3049ed7f7871e3f932a445bf | a54f78f026c937b5a8a31180024496748169db91 | /nibabel/cmdline/convert.py | c0bc8f212eb0bea113f9dc2e612a04df9b450b1b | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"PDDL-1.0"
] | permissive | nipy/nibabel | 7017e29ee9e3e93d1085d9032c32f6d922b0e43d | 8fea2a8e50aaf4d8b0d4bfff7a21b132914120ee | refs/heads/master | 2023-08-22T07:12:46.167323 | 2023-08-06T23:46:30 | 2023-08-06T23:46:30 | 791,352 | 544 | 239 | NOASSERTION | 2023-09-08T19:10:32 | 2010-07-22T16:28:30 | Python | UTF-8 | Python | false | false | 2,233 | py | #!python
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
Convert neuroimaging file to new parameters
"""
import argparse
import warnings
from pathlib import Path
import nibabel as nib
def _get_parser():
"""Return command-line argument parser."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('infile', help='Neuroimaging volume to convert')
p.add_argument('outfile', help='Name of output file')
p.add_argument(
'--out-dtype', action='store', help='On-disk data type; valid argument to numpy.dtype()'
)
p.add_argument(
'--image-type',
action='store',
help='Name of NiBabel image class to create, e.g. Nifti1Image. '
'If specified, will be used prior to setting dtype. If unspecified, '
'a new image like `infile` will be created and converted to a type '
'matching the extension of `outfile`.',
)
p.add_argument(
'-f',
'--force',
action='store_true',
help='Overwrite output file if it exists, and ignore warnings if possible',
)
p.add_argument('-V', '--version', action='version', version=f'{p.prog} {nib.__version__}')
return p
def main(args=None):
"""Main program function."""
parser = _get_parser()
opts = parser.parse_args(args)
orig = nib.load(opts.infile)
if not opts.force and Path(opts.outfile).exists():
raise FileExistsError(f'Output file exists: {opts.outfile}')
if opts.image_type:
klass = getattr(nib, opts.image_type)
else:
klass = orig.__class__
out_img = klass.from_image(orig)
if opts.out_dtype:
try:
out_img.set_data_dtype(opts.out_dtype)
except Exception as e:
if opts.force:
warnings.warn(f'Ignoring error: {e!r}')
else:
raise
nib.save(out_img, opts.outfile)
| [
"markiewicz@stanford.edu"
] | markiewicz@stanford.edu |
38d18892148e084929ca4f3658094b983819c601 | 2581fbdc72887143376a8f9d8f0da0f1508b9cdf | /Flask/08-Social-Blog-Project/11-Blog-Posts-Views/puppycompanyblog/users/picture_handler.py | ecac9e972d79f099fdad038fbc6ed8fac83e338a | [
"Apache-2.0"
] | permissive | Sandy1811/python-for-all | 6e8a554a336b6244af127c7bcd51d36018b047d9 | fdb6878d93502773ba8da809c2de1b33c96fb9a0 | refs/heads/master | 2022-05-16T02:36:47.676560 | 2019-08-16T08:35:42 | 2019-08-16T08:35:42 | 198,479,841 | 1 | 0 | Apache-2.0 | 2022-03-11T23:56:32 | 2019-07-23T17:39:38 | Jupyter Notebook | UTF-8 | Python | false | false | 611 | py | import os
# pip install pillow
from PIL import Image
from flask import url_for, current_app
def add_profile_pic(pic_upload,username):
filename = pic_upload.filename
# Grab extension type .jpg or .png
ext_type = filename.split('.')[-1]
storage_filename = str(username) + '.' +ext_type
filepath = os.path.join(current_app.root_path, 'static\profile_pics', storage_filename)
# Play Around with this size.
output_size = (200, 200)
# Open the picture and save it
pic = Image.open(pic_upload)
pic.thumbnail(output_size)
pic.save(filepath)
return storage_filename
| [
"sndp1811@gmail.com"
] | sndp1811@gmail.com |
04f8120a4657332ab632b812bfbf975295baebcb | a867b1c9da10a93136550c767c45e0d8c98f5675 | /AZ_GFG_DP_EditDistance.py | 4ed89d71dbcfd5e1a552f7b0b1dc754bda561f19 | [] | no_license | Omkar02/FAANG | f747aacc938bf747129b8ff35b6648fb265d95b6 | ee9b245aa83ea58aa67954ab96442561dbe68d06 | refs/heads/master | 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='Dynamic-Programing', Difficult='Medium')
cnt = [0]
def editDist(stringOne, stringTwo, n, m, cache):
cnt[0] += 1
current = (n, m)
if current in cache:
return cache[current]
if not n or not m:
return 0
elif stringOne[n - 1] == stringTwo[m - 1]:
cache[current] = editDist(stringOne, stringTwo, n - 1, m - 1, cache)
else:
cache[current] = 1 + min(editDist(stringOne, stringTwo, n, m - 1, cache), # Insert
editDist(stringOne, stringTwo, n - 1, m, cache), # Remove
editDist(stringOne, stringTwo, n - 1, m - 1, cache)) # Replace
return cache[current]
stringOne = "geek"
stringTwo = "gesek"
print(editDist(stringOne, stringTwo, len(stringOne), len(stringTwo), {}))
print(cnt)
| [
"omkarjoshi4031@live.com"
] | omkarjoshi4031@live.com |
46ca3e2a6be53b2c21d526a65d215a4c498382a9 | 15a7af91ff82773a4eb9818f0dd6fafa5bbf9ff6 | /15_examples_os_file/os_write.py | ff2507f2414ee308329d4cc83c4c2b42c4b085e2 | [] | no_license | hooj0/python-examples | 6d13579d2a7e6424bdc7f7be061d72e27ab8cddd | 1fe2960a710dec60ab640370eee0d60c8222523e | refs/heads/master | 2022-07-03T09:31:36.586102 | 2022-06-14T07:59:07 | 2022-06-14T07:59:07 | 109,703,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | #!/usr/bin/env python3
# encoding: utf-8
# @author: hoojo
# @email: hoojo_@126.com
# @github: https://github.com/hooj0
# @create date: 2018-04-01 17:42:04
# @copyright by hoojo@2018
# @changelog Added python3 `os file -> write` example
import os
'''
概述
os.write() 方法用于写入字符串到文件描述符 fd 中. 返回实际写入的字符串长度。
在Unix中有效。
语法
write()方法语法格式如下:
os.write(fd, str)
参数
fd -- 文件描述符。
str -- 写入的字符串。
返回值
该方法返回写入的实际位数
'''
# 打开文件
fd = os.open("/tmp/foo.txt", os.O_RDWR|os.O_CREAT)
# 写入字符串
str = "new content"
ret = os.write(fd, str)
# 输入返回值
print("写入的位数为: ")
print(ret)
print("写入成功")
# 关闭文件
os.close(fd)
print("关闭文件成功!!") | [
"hoojo@qq.com"
] | hoojo@qq.com |
ae08fb15a0b7691cf92e74eed6d5cfba40ffad58 | 6ff85b80c6fe1b3ad5416a304b93551a5e80de10 | /Python/Algorithm/ExitChar.py | ce23c21ad0db983eebb09e241329225d8636f1e0 | [
"MIT"
] | permissive | maniero/SOpt | c600cc2333e0a47ce013be3516bbb8080502ff2a | 5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3 | refs/heads/master | 2023-08-10T16:48:46.058739 | 2023-08-10T13:42:17 | 2023-08-10T13:42:17 | 78,631,930 | 1,002 | 136 | MIT | 2023-01-28T12:10:01 | 2017-01-11T11:19:24 | C# | UTF-8 | Python | false | false | 257 | py | while True:
entrada = input("Insira um número: ")
if entrada == ' ':
break
try:
num = int(entrada)
print(num)
except ValueError:
print('Dado inválido')
print('fim')
#https://pt.stackoverflow.com/q/462114/101
| [
"noreply@github.com"
] | maniero.noreply@github.com |
d9d8ef34a7409dbe76b838decc290acf0fde701e | 4e5b3985ea4425c2895f638b0dee4c5f64882858 | /clients/kratos/python/test/test_login_flow_method.py | 6232c8459e9cbd12a1f6d0f0da245935e07f993f | [
"Apache-2.0"
] | permissive | ms42Q/sdk | f7d96399c3a9006de0bba5f679814c94c6a9f8a8 | 398ebb59b0ab5da762f2a94efac8e94f5313a851 | refs/heads/master | 2023-03-25T18:42:20.439717 | 2021-03-17T13:13:06 | 2021-03-17T13:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,903 | py | # coding: utf-8
"""
Ory Kratos
Welcome to the ORY Kratos HTTP API documentation! # noqa: E501
The version of the OpenAPI document: v0.5.0-alpha.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ory_kratos_client
from ory_kratos_client.models.login_flow_method import LoginFlowMethod # noqa: E501
from ory_kratos_client.rest import ApiException
class TestLoginFlowMethod(unittest.TestCase):
"""LoginFlowMethod unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test LoginFlowMethod
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ory_kratos_client.models.login_flow_method.LoginFlowMethod() # noqa: E501
if include_optional :
return LoginFlowMethod(
config = ory_kratos_client.models.login_flow_method_config.loginFlowMethodConfig(
action = '0',
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
messages = [
ory_kratos_client.models.message.Message(
context = ory_kratos_client.models.context.context(),
id = 56,
text = '0',
type = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
messages = [
ory_kratos_client.models.message.Message(
context = ory_kratos_client.models.context.context(),
id = 56,
text = '0',
type = '0', )
],
method = '0',
providers = [
ory_kratos_client.models.form_field.formField(
disabled = True,
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
], ),
method = '0'
)
else :
return LoginFlowMethod(
config = ory_kratos_client.models.login_flow_method_config.loginFlowMethodConfig(
action = '0',
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
messages = [
ory_kratos_client.models.message.Message(
context = ory_kratos_client.models.context.context(),
id = 56,
text = '0',
type = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
messages = [
ory_kratos_client.models.message.Message(
context = ory_kratos_client.models.context.context(),
id = 56,
text = '0',
type = '0', )
],
method = '0',
providers = [
ory_kratos_client.models.form_field.formField(
disabled = True,
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
], ),
method = '0',
)
def testLoginFlowMethod(self):
"""Test LoginFlowMethod"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"3372410+aeneasr@users.noreply.github.com"
] | 3372410+aeneasr@users.noreply.github.com |
4697ae37ae796a2ee1eca90e5c9d9e07086bbdef | fa5546e2d55ecbdd269e4a8a338dd34c37627961 | /run_sim/analysis_sim/angular-res.py | 5b46345b7a60e41a66a32b4de02183b2168723ba | [] | no_license | zdgriffith/ShowerLLH_scripts | 7f1713b470027259cfefc855597a0d46709e4c25 | 4ba27d416385a82ef1920763dc20a1d6dc81309e | refs/heads/master | 2021-01-19T07:31:50.072658 | 2016-05-03T16:47:29 | 2016-05-03T16:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,050 | py | #!/usr/bin/env python
#=========================================================================
# File Name : angular-res.py
# Description :
# Creation Date : 04-26-2016
# Last Modified : Tue 26 Apr 2016 03:48:09 PM CDT
# Created By : James Bourbeau
#=========================================================================
import sys
import numpy as np
import matplotlib.pyplot as plt
import argparse
import myGlobals as my
import simFunctions_IT as simFunctions
from usefulFunctions import checkdir
import colormaps as cmaps
if __name__ == "__main__":
# Global variables setup for path names
my.setupShowerLLH(verbose=False)
p = argparse.ArgumentParser(
description='Builds binned histograms for use with ShowerLLH')
p.add_argument('-c', '--config', dest='config',
default='IT73',
choices=['IT73','IT81'],
help='Detector configuration')
p.add_argument('-o', '--outFile', dest='outFile',
help='Output filename')
p.add_argument('-b', '--bintype', dest='bintype',
default='standard',
choices=['standard','nozenith','logdist'],
help='Option for a variety of preset bin values')
args = p.parse_args()
datafile = my.llh_data+'/{}_sim/SimPlot_{}.npy'.format(args.config,args.bintype)
data = (np.load(datafile)).item()
print(data.keys())
zenithMC = data['MC_zenith']
zenithLLH = data['zenith']
print('zenithMC = {}'.format(zenithMC))
print('zenithLLH = {}'.format(np.cos(zenithLLH)))
# Zenith Bins in radians (made with equal solid angle bins)
bins = np.linspace(1, np.cos(40*np.pi/180.), 4)
bins = np.append(np.arccos(bins), np.pi/2)
print('bins = {}'.format(bins))
H, xedges, yedges = np.histogram2d(zenithMC, zenithLLH, bins=bins)
print('H = {}'.format(H))
# H needs to be rotated and flipped
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
Hmasked = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
print('Hmasked = {}'.format(Hmasked))
# Plot 2D histogram using pcolor
fig2 = plt.figure()
plt.pcolormesh(bins,bins,Hmasked)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
# fig, ax = plt.subplots(1,1)
# # plt.scatter(zenithMC,zenithLLH)
# plt.hist2d(zenithMC,zenithLLH, bins=40)
# colormap = cmaps.viridis
# plt.colorbar(cmap = colormap)
# # colormap = cmaps.plasma
# # colormap = cmap_discretize(plt.cm.jet,bins)
# # colormap = cmaps.viridis
# # cb.set_label("Foo", labelpad=-1)
# tPars = {'fontsize':16}
# plt.title('Zenith comparison',**tPars)
# ax.set_xlabel(r'MC Zenith $[^{\circ}]$', **tPars)
# ax.set_ylabel(r'LLH Zenith $[^{\circ}]$',**tPars)
# # ax.set_xlim(-650,650)
# plt.show()
# # plt.legend()
# # outfile = '/home/jbourbeau/public_html/figures/snowheights/{}.png'.format(opts['outfile'])
# # checkdir(outfile)
# # plt.savefig(outfile, dpi=300, bbox_inches='tight')
| [
"jrbourbeau@gmail.com"
] | jrbourbeau@gmail.com |
7b1648d71d5e64b480de8d7e481dd76207f65e22 | f7a10bbf3382a31248767e591ac3fed5f67ca565 | /pretix_twilio/urls.py | 9ae10ea7bb7a6122488c8c8ee36e19fc785bb133 | [
"Apache-2.0"
] | permissive | rixx/pretix-twilio | 1abc8e7c431e57b3f7c145f2fdf1cbbb2a6b75e2 | 02948b19f653de7f83c8cfe96f153fbc42506284 | refs/heads/master | 2023-02-23T08:51:26.448968 | 2021-02-01T15:04:25 | 2021-02-01T15:04:25 | 334,114,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(
r"^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/settings/twilio$",
views.TwilioSettings.as_view(),
name="settings",
),
]
| [
"r@rixx.de"
] | r@rixx.de |
b6c4e6243f7aed9aeb62bb560838ff5c8daa92fe | 94b9589d8eb357f784f425051ffb10aa6d2104fa | /lib/doconce/__init__.p.py | cb305805b347a221aa72ac2fccf05f1fe1447c5c | [
"BSD-3-Clause"
] | permissive | sjsrey/doconce | 29607366756a3f48568a128a88c9bb5d65dc3d9d | 7bd32f8b0c99ad6f1173df0c8e058ea1bd400e28 | refs/heads/master | 2021-01-15T17:55:48.436706 | 2015-08-25T09:30:19 | 2015-08-25T09:30:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | '''
# #include "docstrings/docstring.dst.txt"
'''
__version__ = '1.0.3'
version = __version__
__author__ = 'Hans Petter Langtangen', 'Johannes H. Ring'
author = __author__
__acknowledgments__ = ''
from doconce import doconce_format, DocOnceSyntaxError
| [
"hpl@simula.no"
] | hpl@simula.no |
edeb32b3550b8a10a927191fba172c26c0f4753b | 8d5c9369b0fb398c5a6078f6cac43ba8d67202fa | /bscan/models.py | 47a772619a2637e7c450ea377eb1b3ed73c8942e | [
"MIT"
] | permissive | raystyle/bscan | 45191c2c0d26fe450c5d95567b83d47dfcb4c692 | 1edf0c0e738153a294d5cdc1b69d8f167152d5a2 | refs/heads/master | 2020-04-25T03:15:37.186913 | 2019-02-09T22:23:44 | 2019-02-09T22:23:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,932 | py | """Models for use in `bscan` operations."""
from collections import (
namedtuple)
from typing import (
List)
from bscan.io_files import (
file_exists)
from bscan.runtime import (
get_db_value)
from bscan.dir_structure import (
get_scan_file)
ParsedService = namedtuple(
'ParsedService',
['name', 'port'])
"""A class representing a service parsed from unicornscan/nmap outout."""
_DetectedService = namedtuple(
'_DetectedService',
['name', 'target', 'ports', 'scans', 'recommendations'])
class DetectedService(_DetectedService):
"""A class for encapsulating a service detected in a scan."""
def build_scans(self) -> List[str]:
"""Build the scans to be run on this target."""
built_scans = []
for scan, cmd in self.scans.items():
built_scans.extend(self._fill_template(scan, cmd))
return built_scans
def build_recommendations(self) -> List[str]:
"""Build the recommended commands to be run on this target."""
built_recs = []
for i, cmd in enumerate(self.recommendations):
built_recs.extend(self._fill_template('rec' + str(i), cmd))
return built_recs
def port_str(self) -> str:
"""Build a string representing the ports open for this service."""
return ','.join([str(p) for p in self.ports])
def _fill_template(self, scan_name, cmd) -> List[str]:
"""Replace template parameters with values."""
cmd = (cmd.replace('<target>', self.target)
.replace('<wordlist>', get_db_value('web-word-list'))
.replace('<userlist>', get_db_value('brute-user-list'))
.replace('<passlist>', get_db_value('brute-pass-list')))
if '<ports>' in cmd:
fout = get_scan_file(
self.target,
self.name + '.' + '.'.join([str(p) for p in self.ports]) +
'.' + scan_name)
return [cmd.replace('<ports>', self.port_str())
.replace('<fout>', fout)]
elif '<port>' in cmd:
cmds = []
for port in self.ports:
fout = get_scan_file(
self.target,
self.name + '.' + str(port) + '.' + scan_name)
cmds.append(
cmd.replace('<port>', str(port)).replace('<fout>', fout))
return cmds
else:
fout = get_scan_file(self.target, self.name + '.' + scan_name)
# handling edge-case where a qs-spawned non-port scan could be
# overwritten by a ts-spawned non-port scan of the same service
i = 0
while file_exists(fout):
fout = get_scan_file(
self.target,
self.name + '.' + str(i) + '.' + scan_name)
i += 1
cmd = cmd.replace('<fout>', fout)
return [cmd]
| [
"welch18@vt.edu"
] | welch18@vt.edu |
f1f81b63ba6f1487db1daac51ce24b4bea37477e | b4407a7ec61b55cc03cdc65e83d68e162f602cb4 | /2020/day07_bags.py | 9b4eca3fcd4f3695c1df41b8b5ca8198ad2a86d0 | [] | no_license | andy1li/adventofcode | 4f8a3f5776d1f73cbed65f9824b69da2079a3e3b | fa04625d3b59adfbfbc9d1e36c5b333553fda2f6 | refs/heads/master | 2022-12-23T01:19:56.695103 | 2022-12-14T20:48:59 | 2022-12-14T20:50:11 | 225,248,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | # https://adventofcode.com/2020/day/7
from collections import defaultdict
import re
def parse(rules):
G = defaultdict(set)
for rule in rules:
colors = re.findall(r'(\d )?([a-z]+ [a-z]+) bag', rule)
G[colors[0][1]] |= set(colors[1:])
G['no other'] # put 'no other' in G
return G
def fst_star(G):
def contain_gold(color):
return color == 'shiny gold' or any(
contain_gold(x) for _, x in G[color]
)
return sum(map(contain_gold, G)) - 1
def snd_star(G):
def count(color):
if G[color] == {('', 'no other')}: return 1
return 1 + sum(
int(n) * count(x) for n, x in G[color]
)
return count('shiny gold') - 1
TEST1 = '''\
light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.'''.splitlines()
TEST2 = '''\
shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.'''.splitlines()
if __name__ == '__main__':
assert fst_star(parse(TEST1)) == 4
assert snd_star(parse(TEST1)) == 32
assert snd_star(parse(TEST2)) == 126
G = parse(open('data/day07.in'))
print(fst_star(G))
print(snd_star(G)) | [
"li.chenxing@gmail.com"
] | li.chenxing@gmail.com |
a4b8c3d29038e9d1f5f44a739ddb3ce41c146c65 | 0f9c9e4c60f28aa00aff8b80e1e4c142c61d24ce | /Python/LeetCode/ByteDance/215_find_kth_largest.py | 12a1bb08aaa804c798e2c67fce1404231ea277f4 | [] | no_license | shouliang/Development | c56fcc69e658393c138b63b507b96c48232128d5 | b7e3b02c50d54515e584cb18dff83109224245d0 | refs/heads/master | 2020-03-22T09:14:51.070228 | 2019-08-29T02:50:26 | 2019-08-29T02:50:26 | 139,825,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py | '''
数组中的第K个最大元素
215. Kth Largest Element in an Array:https://leetcode.com/problems/kth-largest-element-in-an-array/
解释:
在未排序的数组中找到第 k 个最大的元素。请注意,你需要找的是数组排序后的第 k 个最大的元素,而不是第 k 个不同的元素。
示例 1:
输入: [3,2,1,5,6,4] 和 k = 2
输出: 5
示例 2:
输入: [3,2,3,1,2,4,5,5,6] 和 k = 4
输出: 4
说明:
你可以假设 k 总是有效的,且 1 ≤ k ≤ 数组的长度。
'''
import heapq
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
top_nums = []
for i in range(len(nums)): # 遍历list
if len(top_nums) < k: # 数量未到k个,直接插入小顶堆,Python会自动维护
heapq.heappush(top_nums, nums[i])
elif top_nums[0] < nums[i]: # 小顶堆已满,若栈顶小于nums[i],则更新小顶堆
heapq.heappushpop(top_nums, nums[i])
return top_nums[0] # 最后返回小顶堆堆顶,即为第k个大小的元素
# 利用快排思想:题目中要求的是第k大的元素,故选择一个pivot,让其左边比其大,右边比其小
def _findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
low, high = 0, len(nums)-1
while low <= high:
pivot = self.partition(nums, low, high)
if pivot == k - 1:
return nums[pivot]
elif pivot > k - 1:
high = pivot - 1
else:
low = pivot + 1
def partition(self, nums, low, high):
pivotValue = nums[high]
i = low
for j in range(low, high):
if nums[j] > pivotValue: # 此处有变动,pivot左边比其要大,右边比其要小,因为题目要求是求第k大的元素
self.swap(nums, i, j)
i += 1
self.swap(nums, i, high)
return i
def swap(self, nums, i, j):
nums[i], nums[j] = nums[j], nums[i]
s = Solution()
nums, k = [3, 2, 1, 5, 6, 4], 2
kth = s.findKthLargest(nums, k)
print(kth)
nums, k = [3, 2, 3, 1, 2, 4, 5, 5, 6], 4
kth = s.findKthLargest(nums, k)
print(kth)
nums, k = [3, 2, 3, 1, 2, 4, 5, 5, 6], 4
kth = s._findKthLargest(nums, k)
print(kth)
| [
"git@git.dxl.cc:node/hunqing.git"
] | git@git.dxl.cc:node/hunqing.git |
02615e370b0bfc1f04ad03ca66aa75ed9099e496 | 286dcbb3c75370c2fb06f1d91bb0c98f260b4002 | /elm_architect/main.py | 18538592545b26a1ebff10f2632152491448d283 | [
"BSD-3-Clause"
] | permissive | andrewgryan/bokeh-playground | 6e4aea21bc50d0a8c54b30eeb9ccfee0a92df2e1 | aeab70627a5ccd7f210c354098d30bdf92bb553f | refs/heads/master | 2021-07-04T19:53:25.595711 | 2020-07-31T13:58:22 | 2020-07-31T13:58:22 | 136,485,392 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,334 | py | import bokeh.plotting
import bokeh.models
import bokeh.layouts
from observe import Observable
from redux import Store
import actions
class View(Observable):
def __init__(self):
self.layout = bokeh.layouts.column()
super().__init__()
def render(self, state):
counter = state.get("counter", 0)
if len(self.rows) > counter:
self.layout.children.pop(-1)
elif len(self.rows) < counter:
index = len(self.rows)
self.layout.children.append(self.row(index))
@property
def rows(self):
return self.layout.children
def row(self, index):
button = bokeh.models.Button(label="Button: {}".format(str(index)))
button.on_click(self.on_click(index))
return bokeh.layouts.row(button)
def on_click(self, index):
def callback():
self.notify(actions.set_index(index))
return callback
class AddRemove(Observable):
def __init__(self):
buttons = {
"add": bokeh.models.Button(label="Add"),
"remove": bokeh.models.Button(label="Remove")
}
buttons["add"].on_click(self.on_add)
buttons["remove"].on_click(self.on_remove)
self.layout = bokeh.layouts.row(buttons["add"], buttons["remove"])
super().__init__()
def on_add(self):
self.notify(actions.add_row())
def on_remove(self):
self.notify(actions.remove_row())
class Text(object):
def __init__(self):
self.div = bokeh.models.Div()
self.layout = bokeh.layouts.column(self.div)
def render(self, state):
print("Text.render({})".format(state))
texts = []
for key in ["counter", "index"]:
if key in state:
value = str(state[key])
texts.append("{}: {}".format(key, value))
self.div.text = " ".join(texts)
def main():
store = Store(reducer)
undo_button = bokeh.models.Button(label="Undo")
undo_button.on_click(lambda: store.dispatch(actions.undo()))
redo_button = bokeh.models.Button(label="Redo")
redo_button.on_click(lambda: store.dispatch(actions.redo()))
add_remove = AddRemove()
add_remove.subscribe(store.dispatch)
text = Text()
view = View()
view.subscribe(store.dispatch)
for method in [
text.render,
view.render]:
store.subscribe(method)
column = bokeh.layouts.column(
bokeh.layouts.row(undo_button, redo_button),
bokeh.layouts.row(text.layout),
bokeh.layouts.row(view.layout),
add_remove.layout)
document = bokeh.plotting.curdoc()
document.add_root(column)
def history(reducer):
"""Reducer decorator to make time-travel possible
.. note:: App must be able to re-render initial state
past, present, future = [], s0, []
<action> past, present, future = [s0], s1, []
<action> past, present, future = [s0, s1], s2, []
<undo> past, present, future = [s0], s1, [s2]
<redo> past, present, future = [s0, s1], s2, []
"""
past, present, future = [], {}, []
def wrapped(state, action):
nonlocal past, present, future
kind = action["kind"]
if kind == actions.UNDO:
if len(past) > 0:
future.append(dict(present))
present = past.pop()
return present
else:
return state
elif kind == actions.REDO:
if len(future) > 0:
past.append(dict(present))
present = future.pop()
return present
else:
return state
else:
future = []
past.append(dict(present))
present = reducer(state, action)
return present
return wrapped
@history
def reducer(state, action):
kind = action["kind"]
state = dict(state)
if "ROW" in kind:
counter = state.get("counter", 0)
if kind == actions.ADD_ROW:
counter += 1
elif kind == actions.REMOVE_ROW:
if counter >= 1:
counter += -1
state["counter"] = counter
elif kind == actions.SET_INDEX:
state["index"] = action["payload"]["index"]
return state
if __name__.startswith("bk"):
main()
| [
"andrew.ryan@metoffice.gov.uk"
] | andrew.ryan@metoffice.gov.uk |
8a9761c62f9b83b30a715220682f3ba589974ff0 | c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34 | /source/Clarification/Backtracking/131.分割回文串.py | d841d400f8f0eefd68508d27b7c1b3417fdef254 | [
"MIT"
] | permissive | zhangwang0537/LeetCode-Notebook | 73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1 | 1dbd18114ed688ddeaa3ee83181d373dcc1429e5 | refs/heads/master | 2022-11-13T21:08:20.343562 | 2020-04-09T03:11:51 | 2020-04-09T03:11:51 | 277,572,643 | 0 | 0 | MIT | 2020-07-06T14:59:57 | 2020-07-06T14:59:56 | null | UTF-8 | Python | false | false | 632 | py | # 给定一个字符串 s,将 s 分割成一些子串,使每个子串都是回文串。
#
# 返回 s 所有可能的分割方案。
#
# 示例:
#
# 输入: "aab"
# 输出:
# [
# ["aa","b"],
# ["a","a","b"]
# ]
# 方法一:回溯
class Solution:
def partition(self, s: str) -> List[List[str]]:
res = []
def helper(s,tmp):
if not s:
res.append(tmp)
for i in range(1,len(s) + 1):
if s[:i] == s[:i][::-1]:
helper(s[i:], tmp + [s[:i]])
helper(s,[])
return res
# 方法二:动态规划
| [
"mzm@mail.dlut.edu.cn"
] | mzm@mail.dlut.edu.cn |
b598cf28af263db9d8d3048e6612b97921b96f8c | 31c7a43bab54a6b2dd4e76f72b8afbc96d876b4f | /custom_components/mrbond_airer/cover.py | 7397c3854d46c3ac40e4d8dec85e1f734e54d305 | [] | no_license | zhengwenxiao/HAExtra | 39c486c05e5db90b6d841fcc35d7a1af64358ffc | 2cd21b166e92f43988459dd1448002ce7a9b52e2 | refs/heads/master | 2023-01-19T07:55:45.456000 | 2020-11-15T11:39:31 | 2020-11-15T11:39:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,780 | py | """Support for MrBond Airer."""
import logging
from . import MiioEntity, DOMAIN
from homeassistant.components.cover import CoverEntity, ATTR_POSITION
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
AIRER_DURATION = 10
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the light from config."""
async_add_entities([MrBondAirer(hass, discovery_info, hass.data[DOMAIN])], True)
class MrBondAirer(MiioEntity, CoverEntity, RestoreEntity):
"""Representation of a cover."""
def __init__(self, hass, name, device):
"""Initialize the light device."""
super().__init__(hass, name ,device, True)
self._device.status['airer_location'] = '1'
async def async_added_to_hass(self):
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
_LOGGER.debug("async_added_to_hass: %s", last_state)
if last_state:
location = last_state.attributes.get('airer_location')
if location is not None:
self._device.status['airer_location'] = location
_LOGGER.debug("Restore location: %s", location)
@property
def icon(self):
"""Return the name of the device if any."""
return 'mdi:hanger'
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
location = self._device.status.get('airer_location')
return 0 if location == '2' else (50 if location == '0' else 100)
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._device.status.get('motor') == '1'
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._device.status.get('motor') == '2'
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return self._device.status.get('airer_location') == '2'
def open_cover(self, **kwargs):
"""Open the cover."""
_LOGGER.debug("open_cover: %s", kwargs)
if self._device.control('set_motor', 1):
self._device.status['airer_location'] = '1'
_LOGGER.debug("open_cover success: %s", self._device.status)
def close_cover(self, **kwargs):
"""Close cover."""
_LOGGER.debug("close_cover: %s", kwargs)
if self._device.control('set_motor', 2):
self._device.status['airer_location'] = '2'
def stop_cover(self, **kwargs):
"""Stop the cover."""
_LOGGER.debug("stop_cover: %s", kwargs)
self.pause_cover()
def pause_cover(self):
"""Stop the cover."""
if self._device.control('set_motor', 0):
self._device.status['motor'] == '0'
self._device.status['airer_location'] = '0'
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
_LOGGER.debug("set_cover_position: %s", kwargs)
position = kwargs.get(ATTR_POSITION)
if position <= 0:
self.close_cover()
elif position >= 100:
self.open_cover()
else:
location = self._device.status.get('airer_location')
if location == '1':
self.close_cover()
self._device.status['motor'] == '2'
elif location == '2':
self.open_cover()
self._device.status['motor'] == '1'
else:
return
async_call_later(self._hass, AIRER_DURATION/2, self.pause_cover)
| [
"Yonsm@qq.com"
] | Yonsm@qq.com |
709dd4b6b71ba31375c4507cb10ca26485c5d4b1 | 30ab9750e6ca334941934d1727c85ad59e6b9c8a | /zentral/contrib/nagios/apps.py | 01b4ebb306e996d8eeecebd091d7caaee7e16d81 | [
"Apache-2.0"
] | permissive | ankurvaishley/zentral | 57e7961db65278a0e614975e484927f0391eeadd | a54769f18305c3fc71bae678ed823524aaa8bb06 | refs/heads/main | 2023-05-31T02:56:40.309854 | 2021-07-01T07:51:31 | 2021-07-01T14:15:34 | 382,346,360 | 1 | 0 | Apache-2.0 | 2021-07-02T12:55:47 | 2021-07-02T12:55:47 | null | UTF-8 | Python | false | false | 182 | py | from zentral.utils.apps import ZentralAppConfig
class ZentralNagiosAppConfig(ZentralAppConfig):
name = "zentral.contrib.nagios"
verbose_name = "Zentral Nagios contrib app"
| [
"eric.falconnier@112hz.com"
] | eric.falconnier@112hz.com |
555be1a2570cfdb9f48f526e367dbee1e7e17642 | 3eb55e98ca635cde6c41e895844f6c01b06a91ac | /if_else_2.py | fd625f3734f119929fc686ef956ba52937042625 | [] | no_license | tatikondarahul2001/py | defefacdb726aeb3e2d5515cb8a8dd14e3b7f18c | 30c459312befc2426f403ea4062233dbd0b4fa0a | refs/heads/main | 2023-03-12T07:41:17.550136 | 2021-02-24T14:43:35 | 2021-02-24T14:43:35 | 323,222,577 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | n = int(input("enter the annual income : "))
if(n<=150000):
print("the person is eligible for scholarship")
else:
print("the person is not eligible for scholarship") | [
"noreply@github.com"
] | tatikondarahul2001.noreply@github.com |
44d735b35e4ea027fb4760cd6b8b6201d3419cf9 | e987cd566edc75997f9b02377514d4f3a0dba12c | /sys/src/Python/diffTools/iwdiffLib.py | 60f54a8287436d221f81f04de8d53fb7552002d2 | [] | no_license | 7u83/maxdb-buildtools | f942adff2cd55d0a046b6ef3e18f6645b011a26e | ce9a56943f6195d6755e983035aa96cbe95e6cb2 | refs/heads/master | 2020-05-04T18:23:30.849371 | 2015-02-15T19:25:49 | 2015-02-15T19:25:49 | 30,428,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,900 | py |
#
# ========== licence begin GPL
# Copyright (C) 2001 SAP AG
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ========== licence end
#
import string
import re
import os
import fileutil
from vmake import yutil
import wdiffLib
class IWDiff (wdiffLib.WDiff):
def __init__ (self, options, source):
wdiffLib.WDiff.__init__ (self, options)
own, upper = yutil.getDiffPair (source)
if own is None:
raise 'NotLocal', source
self.addDiff (own, upper)
self.labels = upper, own
class IWDiff3 (wdiffLib.WDiff3):
availableRenamings = [
(7, 8, 'rename7to8.dta'),
]
def __init__ (self, options, relPath, target):
self.getReleases (relPath)
mine, self.relativePath = self.getMine (target)
base = self.getBase (mine)
other = self.getOther ()
wdiffLib.WDiff3.__init__ (self, options, base, mine, other)
def getReleases (self, relPath):
mineRelease, mineDat = getRelInfo (os.environ ['OWN'])
otherRelease, otherDat = getRelInfo (relPath)
self.labels = ['Base', mineDat, otherDat]
isUpMerge = mineRelease > otherRelease
mineVPath = getVmakePath ()
otherVPath = self.otherVPath = getVmakePath (relPath)
if isUpMerge:
self.baseVPath = mineVPath
else:
self.baseVPath = otherVPath
self.createRenaming (mineRelease, otherRelease)
def createRenaming (self, mineRelease, otherRelease):
mineMajor = mineRelease [0]
otherMajor = otherRelease [0]
renamer = None
for lowMajor, highMajor, dataFile in self.availableRenamings:
if (lowMajor == otherMajor) and (highMajor == mineMajor):
renamer = self.createRenamer (dataFile, None)
elif (lowMajor == mineMajor) and (highMajor == otherMajor):
renamer = self.createRenamer (dataFile, 'true')
self.renamer = renamer
def createRenamer (self, dataFile, reverse):
from vmake import renameLib
fullName = renameLib.findRenameMap (dataFile)
result = renameLib.ProjectRenamer (fullName, reverse = reverse)
return result
def getMine (self, target):
relative = yutil.relativePath (target)
mine = yutil.findRelativeFile (relative)
if mine is None:
mine = self.createEmptyFile (relative, self.labels [1])
return mine, relative
def getBase (self, mine):
list = string.split (self.relativePath, os.sep)
assert list [0] == 'sys'
changedList = ['branchorigin'] + list [1:]
relPath = string.join (changedList, os.sep)
base = yutil.findRelativeFile (relPath, vpath = self.baseVPath)
if base is None:
base = self.createEmptyFile (self.relativePath,
self.labels [0], mine)
return base
def getOther (self):
otherFile = yutil.findRelativeFile (self.relativePath, vpath = self.otherVPath)
if otherFile is None:
return self.createEmptyFile (self.relativePath, self.labels [2])
if self.renamer:
base = fileutil.basename (otherFile)
renamedFile = os.path.join (os.environ ['TMP'], 'renamed.' + base)
self.renamer.filterFile (otherFile, renamedFile)
otherFile = renamedFile
return otherFile
def createEmptyFile (self, fname, kind, fillFile = None):
baseName = fileutil.basename (fname)
fname = os.path.join (os.environ ['TMP'], 'empty.'
+ kind + '.' + baseName)
stream = open (fname, 'wt')
if fillFile:
data = open (fillFile, 'rt').read ()
else:
data = '%s: %s does not exist\n' % (kind, baseName)
stream.write (data)
stream.close ()
return fname
def getMineFile (self):
fullName = os.path.join (os.environ ['OWN'], self.relativePath)
if os.path.exists (fullName):
return fullName
yutil.inew (fullName)
return fullName
def getRelInfo (dir):
data = open (os.path.join (dir, 'Release.dat'), 'rt').read ()
parts = string.split (data, '.')
list = map (int, parts)
return tuple (list), data
def getVmakePath (dir = None):
if dir is None:
pathvar = os.environ ['VMAKE_PATH']
else:
pathvar = parseIprofile (dir)
result = string.split (pathvar, ',')
return result
def parseIprofile (dir):
lines = open (os.path.join (dir, 'iprofile.bat'), 'rt').readlines ()
env = {'OWN': dir}
setRE = re.compile (' *set +([^=]+)=(.*)', re.IGNORECASE)
varRE = re.compile ('%[^%]+%')
def expandvar (match, env = env):
varname = match.group (0) [1:-1]
if env.has_key (varname):
return env [varname]
else:
return ''
for line in lines:
match = setRE.match (line)
if match:
varname, value = match.group (1, 2)
value = string.strip (value)
value = varRE.sub (expandvar, value)
if varname == 'VMAKE_PATH':
return value
else:
env [varname] = value
return ''
| [
"7u83@mail.ru"
] | 7u83@mail.ru |
a654ea1d0fbbc83d6ab5cc63095e9c0667b0cb68 | 7875b7685dde7ad0bbcf9e0540373d1ce5e886b7 | /project_name/conf/settings.py | 97b49557acd3f508bf0071549f107b7a089c2167 | [
"BSD-2-Clause"
] | permissive | bruth/wicked-django-template | ea6b742f4b8bd73cf68d5a86ffe4582f9c747eb0 | 7628d7a3fa50b23c34772a6978c00019e4f6526e | refs/heads/master | 2020-05-17T07:56:17.507674 | 2018-06-21T19:30:02 | 2018-06-21T19:30:02 | 3,102,471 | 2 | 3 | BSD-2-Clause | 2018-06-21T19:30:03 | 2012-01-04T14:28:29 | JavaScript | UTF-8 | Python | false | false | 909 | py | import os
from .global_settings import * # noqa
try:
from .local_settings import * # noqa
except ImportError:
import warnings
warnings.warn('Local settings have not been found '
'({{ project_name }}.conf.local_settings)')
# FORCE_SCRIPT_NAME overrides the interpreted 'SCRIPT_NAME' provided by the
# web server. since the URLs below are used for various purposes outside of
# the WSGI application (static and media files), these need to be updated to
# reflect this alteration
if FORCE_SCRIPT_NAME:
STATIC_URL = os.path.join(FORCE_SCRIPT_NAME, STATIC_URL[1:])
MEDIA_URL = os.path.join(FORCE_SCRIPT_NAME, MEDIA_URL[1:])
LOGIN_URL = os.path.join(FORCE_SCRIPT_NAME, LOGIN_URL[1:])
LOGOUT_URL = os.path.join(FORCE_SCRIPT_NAME, LOGOUT_URL[1:])
LOGIN_REDIRECT_URL = os.path.join(FORCE_SCRIPT_NAME,
LOGIN_REDIRECT_URL[1:])
| [
"b@devel.io"
] | b@devel.io |
3bcde3d67807a656d564f24828810b2c5be3d078 | 6dc9f1753f0e2ccaef6fb385324ba0602a04042a | /CUHK_CPM/GPS_Project/GPS_main/python/gps/utility/coord_transform.py | 0c4b1a2ebb1b49ef5cb0c4d38ed6eb3b5bd97672 | [] | no_license | SunnyLyz/Deep_Learning | c413abe3ef6510b3492f0a73c9a287b4bf56ec2c | 9fa58688a7daffdded8037b9fa20c571a00f87e0 | refs/heads/master | 2021-06-21T12:12:39.450564 | 2017-07-18T12:20:45 | 2017-07-18T12:20:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,615 | py | import numpy as np
import cv2
robot2world_offset_mm = np.array([0, 0, 630], dtype=float)
peg2robot_offset_mm = np.array([105, 0, 0], dtype=float)
peg_points_mm = np.array([[ 0, 0, 0],
[ 100, 30, 30],
[ 100, -30, 30],
[ 100, 30, -30],
[ 100, -30, -30],
[-100, 30, 30],
[-100, -30, 30],
[-100, 30, -30],
[-100, -30, -30]], dtype=float)
brick_points_mm = np.array([[ 0, 0, 50],
[ 40, 40, 50],
[ 40, -40, 50],
[ -40, 40, 50],
[ -40, -40, 50],
[ 160, 160, 50],
[-160, 160, 50],
[ 160, -160, 50],
[-160, -160, 50]], dtype=float)
fov = 1.0808390005411683
def robot2world_m(robot_coord_mm):
return (robot_coord_mm + robot2world_offset_mm) / 1000
def world2robot_mm(world_coord_m):
return world_coord_m * 1000 - robot2world_offset_mm
def get_peg_coord_in_robot_mm(ee_pos_mm, ee_rot_rad):
pos = ee_pos_mm + peg2robot_offset_mm
rot = ee_rot_rad
rm = rot_mat(rot)
return rm.dot(peg_points_mm.T).T + pos
def get_brick_coord_in_world_m(center_pos):
pos = center_pos[:3] * 1000
rot = np.array([center_pos[5], center_pos[4], center_pos[3]])
Tow = transformation_matrix(np.concatenate((pos, rot)), False)
Pxw = Tow.dot(np.c_[brick_points_mm, np.ones(brick_points_mm.shape[0])].T)
return Pxw[:3].T / 1000
def rot_mat(rot):
u, v, w = rot
Rx = np.array([[1, 0, 0],
[0, np.cos(w), -np.sin(w)],
[0, np.sin(w), np.cos(w)]], dtype=np.float32)
Ry = np.array([[np.cos(v), 0, np.sin(v)],
[0, 1, 0],
[-np.sin(v), 0, np.cos(v)]], dtype=np.float32)
Rz = np.array([[np.cos(u), -np.sin(u), 0],
[np.sin(u), np.cos(u), 0],
[0, 0, 1]], dtype=np.float32)
return Rz.dot(Ry.dot(Rx))
def transformation_matrix(pose, degree):
position = pose[:3]
rot = pose[3:]
if degree:
rot /= 180.0 / np.pi
rotMat = rot_mat(rot)
tfMat = np.eye(4, dtype=np.float32)
tfMat[:3, :3] = rotMat
tfMat[:3, -1] = position
return tfMat
def projectPtsToImg(points, camera_pose, img_size, degree=False):
f = img_size / (np.tan(fov / 2.0) * 2.0)
cameraMatrix = np.array([
[f, 0, img_size / 2.0],
[0, f, img_size / 2.0],
[0, 0, 1]
], dtype=np.float32)
Tcw = transformation_matrix(camera_pose, degree)
Twc = np.linalg.inv(Tcw)
Pxw = np.pad(points.T, ((0, 1), (0, 0)), 'constant', constant_values=1)
Pxc = Twc.dot(Pxw)[:3]
scaled_img_points = cameraMatrix.dot(Pxc)
img_points = scaled_img_points[:2] / scaled_img_points[2]
return img_points.T.reshape(points.shape[0], -1, 2)
def get3DPtsFromImg(points, zw, camera_pose, img_size, degree=False):
f = img_size / (np.tan(fov / 2.0) * 2.0)
cameraMatrix = np.array([
[f, 0, img_size / 2.0],
[0, f, img_size / 2.0],
[0, 0, 1]
], dtype=np.float32)
inv_cameraMatrix = np.linalg.inv(cameraMatrix)
Tcw = transformation_matrix(camera_pose, degree)
img_points = np.pad(points.T, ((0, 1), (0, 0)), 'constant', constant_values=1)
Pxc = inv_cameraMatrix.dot(img_points)
Pxc = np.pad(Pxc, ((0, 1), (0, 0)), 'constant', constant_values=1)
Pxw = Tcw.dot(Pxc)
camera_origin = camera_pose[:3].reshape(3, 1)
space_points = (Pxw[:2] - camera_origin[:2]) / (Pxw[2] - camera_origin[2]) * (zw - camera_origin[2]) + camera_origin[:2]
return space_points.T
if __name__ == '__main__':
from pi_robot_API import Communication
import time
imgsz = 480
pointA = 1
pointB = 5
camera_pose = np.array([1420, -450, 1180, 1.08, 0.003, -1.77])
com = Communication() # pi ros communication
# brick_pos = np.array([0.6, 0, 0.775, 0, 0, -np.pi/6])
brick_pos = np.array([ 0.6130, -0.1270, 0.7750, 0.0000, 0.0000, 0/180*np.pi])
com.Set_Object_Pos('hole', brick_pos)
time.sleep(1)
brick_pos = np.concatenate((brick_pos[:3]*1000, np.array([brick_pos[5], brick_pos[4], brick_pos[3]])))
Tow = transformation_matrix(brick_pos, False)
Pxw = Tow.dot(np.c_[brick_points_mm, np.ones(brick_points_mm.shape[0])].T)
points = Pxw[:3, 1:].T
print points
# peg_coords = get_peg_coord_in_robot_mm(np.array([418.4, 0, 629.89]), np.array([0, -0.5*np.pi, -np.pi]))
# points = robot2world_m(peg_coords) * 1000
image = com.Get_image_RGB()
image = cv2.resize(image, (imgsz, imgsz))
ProjImgPts = projectPtsToImg(points, camera_pose, imgsz)
print get3DPtsFromImg(ProjImgPts[:, 0], 825, camera_pose, imgsz)
real_distance = np.linalg.norm(brick_points_mm[pointA] - brick_points_mm[pointB])
img_distance = np.linalg.norm(ProjImgPts[pointA] - ProjImgPts[pointB])
print 'real distance', real_distance
print 'image distance', img_distance
print 'real distance per pixel', real_distance / img_distance
n = 0
for coord in ProjImgPts[:, 0]:
if n == pointA or n == pointB:
color = np.array([0, 0, 255, 255])
else:
color = np.array([0, 0, 0, 255])
cv2.circle(image, tuple(np.round(coord).astype(int)), radius=3, color=color, thickness=2)
n += 1
cv2.imshow('image', image)
cv2.waitKey(0)
| [
"hswong1@uci.edu"
] | hswong1@uci.edu |
2327db29e9f8718074dddfe00384342630510b74 | db58da608831911373c4a5d29ae1f5ec125c1f1d | /apps/users/migrations/0007_merge_20181012_1750.py | 7cf373785a91d9ffc4f51a35c0a63a198e326e54 | [] | no_license | m6ttl/ew_git | f4ff23b9a5df768f966292bbdeeea0c9ae317c12 | dd6ee2d781b5eb305ec212be2fd58b56bf6db8b3 | refs/heads/master | 2022-11-30T23:23:58.711424 | 2019-08-23T09:28:11 | 2019-08-23T09:28:11 | 202,071,075 | 0 | 0 | null | 2022-11-22T02:24:13 | 2019-08-13T05:30:24 | HTML | UTF-8 | Python | false | false | 269 | py | # Generated by Django 2.0.1 on 2018-10-12 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20181010_1442'),
('users', '0006_auto_20180917_1135'),
]
operations = [
]
| [
"steve_wei@163.net"
] | steve_wei@163.net |
85fed18620833a307afb4302f5a3e857877e0f05 | 8402f6ed6dd652afc8bd0ab2110c30509d39fe7a | /lightning/tests/test_lasvm.py | eaf9c360fa19df63cecc7edc8a973ed11d11ee66 | [] | no_license | Raz0r/lightning | 690f8bd99c59c8db38b5c421bb845c9eeda2101b | 06c7ff5d9a98c19564a24bd17032bbf3a61da770 | refs/heads/master | 2021-01-20T23:27:25.745058 | 2012-10-29T12:40:30 | 2012-10-29T12:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal, assert_array_almost_equal, \
assert_almost_equal
from nose.tools import assert_raises, assert_true, assert_equal
from sklearn.datasets.samples_generator import make_classification
from lightning.lasvm import LaSVM
bin_dense, bin_target = make_classification(n_samples=200, n_features=100,
n_informative=5,
n_classes=2, random_state=0)
bin_sparse = sp.csr_matrix(bin_dense)
mult_dense, mult_target = make_classification(n_samples=300, n_features=100,
n_informative=5,
n_classes=3, random_state=0)
mult_sparse = sp.csr_matrix(mult_dense)
def test_fit_linear_binary():
for selection, exp in (("permute", 1.0),
("active", 1.0),
("loss", 1.0)):
clf = LaSVM(random_state=0, max_iter=2, kernel="linear",
selection=selection)
clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_almost_equal(acc, exp)
def test_fit_rbf_binary():
for selection in ("permute", "active", "loss"):
clf = LaSVM(random_state=0, max_iter=2, kernel="rbf",
selection=selection)
clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_almost_equal(acc, 1.0)
def test_fit_rbf_multi():
clf = LaSVM(kernel="rbf", gamma=0.1, random_state=0)
clf.fit(mult_dense, mult_target)
y_pred = clf.predict(mult_dense)
acc = np.mean(y_pred == mult_target)
assert_almost_equal(acc, 1.0)
def test_warm_start():
for selection in ("permute", "active", "loss"):
clf = LaSVM(random_state=0, max_iter=2, kernel="rbf", warm_start=True,
selection=selection)
clf.C = 0.5
clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_almost_equal(acc, 1.0, 1)
clf.C = 0.6
clf.fit(bin_dense, bin_target)
acc = clf.score(bin_dense, bin_target)
assert_almost_equal(acc, 1.0)
def test_n_components():
clf = LaSVM(random_state=0, max_iter=2, kernel="rbf", finish_step=True,
termination="n_components", n_components=30)
clf.fit(bin_dense, bin_target)
n_sv = np.sum(clf.coef_ != 0)
assert_equal(n_sv, 30)
| [
"mathieu@mblondel.org"
] | mathieu@mblondel.org |
21b6ca2d5a5996a215be096064caa1906c792a0a | 09d4e64288e07533679fdc838e456ed3d51e616f | /keepkey_pycoin_bip32utils/bip32test.py | 49d942845b172de488b22102eaf5b7212832a0f7 | [] | no_license | the-schnibble/dash-testnet | 18850e87b037390df6b429688d888b387583f08b | 9d35337e47377e3ed51d72885448faedb7602fdf | refs/heads/master | 2021-01-02T08:30:37.455104 | 2017-08-01T16:08:45 | 2017-08-03T12:28:49 | 99,014,531 | 2 | 1 | null | 2017-08-01T15:11:35 | 2017-08-01T15:11:35 | null | UTF-8 | Python | false | false | 1,802 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://github.com/chaeplin/bip32utils
from bip32utils import BIP32Key
def process_address(desc, address):
print(desc, address)
return None
def process_chain(desc, chain_node):
i = 0
g = 0
while True:
desci = '%s%d' % (desc, i)
addr_node = chain_node.ChildKey(i)
address = addr_node.Address()
if process_address(desci, address):
g = 0
else:
g += 1
if g > gap:
break
i += 1
xpub = 'tpubDF8GkupYdvTQrsuL6HkCmpSJ7oENkKk9k7cRFuHQWrxca25pSBTq594ZebPxvwzQAdspYh5rd1nKz94TBhP4F2N1SqxqREk4ojXEQYCaYem'
addresses = []
gap = 10
acc_node = BIP32Key.fromExtendedKey(xpub)
process_chain('m/', acc_node)
# ./bip32test.py
# m/0 yVUfEs2mdTrVsVRLZg9LoCp8sNBGc3p4FV
# m/1 yfj8WoDP8sJNFSH8vr5pEEsQ8vZ2hCHped
# m/2 yNXtJuijSCNPCbLZbcLHwZoUaD4RzMo19P
# m/3 yQNVQdYosUHkk4wUjxzbLFEXfGqyGJXzXC
# m/4 yTH5axsQ3X8YBiiEKPVY66n9choyEodcKC
# m/5 yNAihSEJQH2hSbnUKRGWcn7LYij56VKPCP
# m/6 yicVWrfJYDFAxUwTbdQnWjTjhre5dx4HBg
# m/7 ySD94FvVzTtYNFmwirK4qE4jhtxjrVsoJ9
# m/8 yRkY4zL4kJr7H7QqMtfDhNtxCeqU2uTqth
# m/9 yQNwssFrbo2CtBrBCHt9D3ttaNLK6xDf7C
# m/10 yPEuaemjx5TBnvQrpEKavrcb8MnL4XGRCA
#
#
# ku -s0-10 -a --override-network tDASH tpubDF8GkupYdvTQrsuL6HkCmpSJ7oENkKk9k7cRFuHQWrxca25pSBTq594ZebPxvwzQAdspYh5rd1nKz94TBhP4F2N1SqxqREk4ojXEQYCaYem
# yVUfEs2mdTrVsVRLZg9LoCp8sNBGc3p4FV
# yfj8WoDP8sJNFSH8vr5pEEsQ8vZ2hCHped
# yNXtJuijSCNPCbLZbcLHwZoUaD4RzMo19P
# yQNVQdYosUHkk4wUjxzbLFEXfGqyGJXzXC
# yTH5axsQ3X8YBiiEKPVY66n9choyEodcKC
# yNAihSEJQH2hSbnUKRGWcn7LYij56VKPCP
# yicVWrfJYDFAxUwTbdQnWjTjhre5dx4HBg
# ySD94FvVzTtYNFmwirK4qE4jhtxjrVsoJ9
# yRkY4zL4kJr7H7QqMtfDhNtxCeqU2uTqth
# yQNwssFrbo2CtBrBCHt9D3ttaNLK6xDf7C
# yPEuaemjx5TBnvQrpEKavrcb8MnL4XGRCA
#
#
| [
"chaeplin@gmail.com"
] | chaeplin@gmail.com |
2b0d899a3c134524e228897572665cbd2c9538c6 | be134c181703b95aca1e48b6a31bcfdb7bcfcc76 | /site/mezzanine_old/generic/migrations/0001_initial.py | 31d4077247ac13758bdd23f109705d4e0c018a28 | [] | permissive | aldenjenkins/ThiccGaming | 0245955a797394bcfeedb2cfb385f633653ba55d | 4790d2568b019438d1569d0fe4e9f9aba008b737 | refs/heads/master | 2022-12-16T02:43:36.532981 | 2021-11-17T04:15:21 | 2021-11-17T04:15:21 | 154,858,818 | 0 | 0 | BSD-3-Clause | 2022-12-08T02:58:44 | 2018-10-26T15:52:39 | Python | UTF-8 | Python | false | false | 3,731 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('django_comments', '__first__'),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AssignedKeyword',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('_order', models.IntegerField(null=True, verbose_name='Order')),
('object_pk', models.IntegerField()),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'ordering': ('_order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Keyword',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=500, verbose_name='Title')),
('slug', models.CharField(help_text='Leave blank to have the URL auto-generated from the title.', max_length=2000, null=True, verbose_name='URL', blank=True)),
('site', models.ForeignKey(editable=False, to='sites.Site')),
],
options={
'verbose_name': 'Keyword',
'verbose_name_plural': 'Keywords',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.IntegerField(verbose_name='Value')),
('rating_date', models.DateTimeField(auto_now_add=True, verbose_name='Rating date', null=True)),
('object_pk', models.IntegerField()),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('user', models.ForeignKey(related_name='ratings', verbose_name='Rater', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'Rating',
'verbose_name_plural': 'Ratings',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ThreadedComment',
fields=[
('comment_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='django_comments.Comment')),
('rating_count', models.IntegerField(default=0, editable=False)),
('rating_sum', models.IntegerField(default=0, editable=False)),
('rating_average', models.FloatField(default=0, editable=False)),
('by_author', models.BooleanField(default=False, verbose_name='By the blog author')),
('replied_to', models.ForeignKey(related_name='comments', editable=False, to='generic.ThreadedComment', null=True)),
],
options={
'verbose_name': 'Comment',
'verbose_name_plural': 'Comments',
},
bases=('django_comments.comment',),
),
migrations.AddField(
model_name='assignedkeyword',
name='keyword',
field=models.ForeignKey(related_name='assignments', verbose_name='Keyword', to='generic.Keyword'),
preserve_default=True,
),
]
| [
"alden@aldenjenkins.com"
] | alden@aldenjenkins.com |
9f8b39a6994e8d684dd21ddf440225ddc1317351 | af6be0921c461143424fc75ed5a55b25ad792952 | /CPANJMIS-master/CPANJMIS-master/project/apps/account/migrations/0013_auto_20190805_1608.py | e1703d18603000555063bf77a18608c98ffde606 | [] | no_license | bobolinbo/bobolearning | c90b4dccdfcec302df9f166a04a75d585e4c6a0b | ac520d9311b15e29c055edb1eb7b3b800d5051ac | refs/heads/master | 2020-08-13T07:24:57.044426 | 2019-10-14T04:11:46 | 2019-10-14T04:11:46 | 214,931,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | # Generated by Django 2.2 on 2019-08-05 16:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('basicdata', '0022_auto_20190805_1608'),
('account', '0012_auto_20190805_1604'),
]
operations = [
migrations.AlterField(
model_name='user',
name='department',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='basicdata.Department', verbose_name='部门'),
),
migrations.CreateModel(
name='PersonnelProfessionalInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('authorization', models.ManyToManyField(blank=True, null=True, to='basicdata.AuthorizationClassify', verbose_name='授权列表')),
('course_list', models.ManyToManyField(blank=True, null=True, to='basicdata.Course', verbose_name='课程列表')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"1539019002@qq.com"
] | 1539019002@qq.com |
41894cbb7710b238975a5bb2d90f3de9766fabbc | 4d2bb970d9b4aa096272b67d952cfdd3b1e7f75a | /tests/problem_5/test_instances_independence.py | 98923b0e5297c156958a492ae2dbab46d8deabd8 | [] | no_license | ASU-CompMethodsPhysics-PHY494/activity_07_modules_classes | 101c99bc204d12ae3767df81983dadf50548a430 | 1878150fb0143be46fd963552fda2612798dd1d7 | refs/heads/main | 2023-03-02T19:38:43.542813 | 2021-02-09T18:29:14 | 2021-02-09T18:29:29 | 337,378,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | # -*- coding: utf-8 -*-
# ASSIGNMENT: Activity 07 (Modules and Classes)
# PROBLEM NUMBER: 5
# place as problem_x/test_name.py so that relative imports work
import pytest
from ..tst import _test_output, assert_python3
FILENAME = 'ball_oon.py'
POINTS = 4
def test_python3():
assert_python3()
def test_instances_independence():
return _test_output(FILENAME,
r"""ball at (-1, -1, 0) != balloon at (0, 0, 10)""",
input_values=None,
regex=False)
| [
"orbeckst@gmail.com"
] | orbeckst@gmail.com |
34bc6b2db4ff6317b55d3a95d61ff2eadcad4727 | cd90bbc775cbce9a7e0bc46cbb9437e3961e587f | /python/audioscrob/query_lsi.py | 077cc9d863ff4cc547efc79886b630d9029e0755 | [] | no_license | llimllib/personal_code | 7b3f0483589e2928bf994184e3413f4b887e1f0c | 4d4662d53e0ac293dea8a4208ccca4a1f272e64a | refs/heads/master | 2023-09-05T04:02:05.075388 | 2023-09-01T12:34:09 | 2023-09-01T12:34:09 | 77,958 | 9 | 16 | null | 2023-08-16T13:54:39 | 2008-11-19T02:04:46 | HTML | UTF-8 | Python | false | false | 1,338 | py | #!/usr/bin/env python
import lsi, cPickle, sets, sys
import numarray as na
def load_svd(sfile, ufile, vfile):
"""loads dense svd files as output by svdlibc"""
n_s = int(sfile.readline())
S = na.zeros((n_s, n_s), type="Float32") #store S as a column vector
for i in range(n_s):
S[i,i] = float(sfile.readline())
assert sfile.readline() == ''
rows, columns = [int(a) for a in ufile.readline().split()]
U = na.zeros((rows, columns), type="Float32")
row = 0
for line in ufile:
col = 0
for n in line.split():
U[row, col] = float(n)
col += 1
row += 1
rows, columns = [int(a) for a in vfile.readline().split()]
V = na.zeros((rows, columns), type="Float32")
row = 0
for line in vfile:
col = 0
for n in line.split():
V[row, col] = float(n)
col += 1
row += 1
return U, S, V
up = cPickle.Unpickler(file('artist_user.pickle', 'rb'))
artists, users = up.load()
#U is artists
U, S, V = load_svd(file('big_s', 'r'), file('big_ut', 'r'), file('big_vt', 'r'))
#I believe that U is already transposed
tt = na.dot(na.transpose(U), na.dot(na.dot(S, S), U))
fout = file('lsi.out', 'wb')
cp = cPickle.Pickler(fout, -1)
cp.dump(tt)
fout.close()
| [
"llimllib@c4aad215-931b-0410-ba74-c4b90e0b6ad6"
] | llimllib@c4aad215-931b-0410-ba74-c4b90e0b6ad6 |
89be64832c01c67e3c3ac8db4c1d42f4569b7102 | 7673df8dec063e83aa01187d5a02ca8b4ac3761d | /Polymorphism_HTML_Generator.py | d73b1e74699692adb1c529c2946bae120b7142f4 | [] | no_license | jedthompson99/Python_Course | cc905b42a26a2aaf008ce5cb8aaaa6b3b66df61e | 618368390f8a7825459a20b4bc28e80c22da5dda | refs/heads/master | 2023-07-01T08:39:11.309175 | 2021-08-09T17:28:32 | 2021-08-09T17:28:32 | 361,793,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | class Html:
def __init__(self, content):
self.content = content
def render(self):
raise NotImplementedError('Subclass must implement render method')
class Heading(Html):
def render(self):
return f'<h1>{self.content}</h1>'
class Div(Html):
def render(self):
return f'<div>{self.content}</div>'
tags = [
Div('some content'),
Heading('some big heading'),
Div('Another Div')
]
for tag in tags:
print(str(tag) + ': ' + tag.render())
| [
"jedthompson@gmail.com"
] | jedthompson@gmail.com |
44eed9d5f25a1a81fe6b5310bb1293b064ddbde9 | 7673b9c758f40ebdaa22efc22331f8169e763fc8 | /Game/jumpy.py | 8f5acec5772153c56d077a81f8e487038d22ac4f | [] | no_license | eduardogpg/jumpy_game | c47677a3c90ad4b58a2889067e4bfa9d29036a5a | 778cc75fbe94b706048af2e4480bfb9a87b6ae13 | refs/heads/master | 2020-04-02T00:24:50.137965 | 2018-10-20T01:39:30 | 2018-10-20T01:39:30 | 153,803,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,277 | py | from .settings import *
from .player import Player
from .platform import Platform
import random
import pygame
class Jumpy:
def __init__(self):
pygame.init()
pygame.mixer.init()
self.screen = pygame.display.set_mode( (WIDTH, HEIGHT ))
self.clock = pygame.time.Clock()
self.running = True
self.playing = False
pygame.display.set_caption(TITLE)
self.font_name = pygame.font.match_font(FONT_NAME)
def new(self):
self.score = 0
self.player = Player(self, WIDTH / 2, HEIGHT / 2 )
self.sprites = pygame.sprite.Group()
self.platforms = pygame.sprite.Group()
self.sprites.add(self.player)
for platform_config in PLATFORM_LIST:
platform = Platform(*platform_config)
self.sprites.add(platform)
self.platforms.add(platform)
self.playing = True
self.run()
def run(self):
while self.playing:
self.clock.tick(FPS)
self.events()
self.update()
self.draw()
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.player.jump()
def update(self):
self.sprites.update()
#check if player hits a platform - only if falling
if self.player.vel.y > 0:
hits = pygame.sprite.spritecollide(self.player, self.platforms, False)
if hits:
self.player.pos.y = hits[0].rect.top + 1
self.player.vel.y = 0
#if player reaches top 1/4 of screen
if self.player.rect.top <= HEIGHT / 4:
self.player.pos.y += abs(self.player.vel.y)
for plat in self.platforms:
plat.rect.y += abs(self.player.vel.y)
if plat.rect.top >= HEIGHT:
plat.kill()
self.score +=10
#spawn new platforms to keep same overage bumber
while len(self.platforms) < 6:
width = random.randrange(50, 100)
p = Platform(random.randrange(0, WIDTH - width),
random.randrange(-50, -30), width, 20)
self.platforms.add(p)
self.sprites.add(p)
#Die!
if self.player.rect.bottom > HEIGHT:
for sprite in self.sprites:
sprite.rect.y -= max(self.player.vel.y , 10)
if sprite.rect.bottom < 0:
sprite.kill()
if len(self.platforms) == 0:
self.playing = False
self.new()
def draw_text(self, text, size, color, x, y):
font = pygame.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
def draw(self):
self.screen.fill(BLACK)
self.sprites.draw(self.screen)
self.draw_text(str(self.score), 22, WHITE, WIDTH / 2, 15)
pygame.display.flip()
| [
"eduardo78d@gmail.com"
] | eduardo78d@gmail.com |
27136f2514a2069bf36aef18602f4a632bd6304f | 3bcc247a2bc1e0720f0344c96f17aa50d4bcdf2d | /第一阶段笔记/程序/shebao.py | 19e17a28f27b34902da9e197bb5d7b5d8eb26429 | [] | no_license | qianpeng-shen/Study_notes | 6f77f21a53266476c3c81c9cf4762b2efbf821fa | 28fb9a1434899efc2d817ae47e94c31e40723d9c | refs/heads/master | 2021-08-16T19:12:57.926127 | 2021-07-06T03:22:05 | 2021-07-06T03:22:05 | 181,856,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,669 | py |
def shu():
print("0) 进入计算")
print("1) 退出计算")
def sum():
s=int(input("请输入你在北京的社保基数(3082~23118):"))
if 3082<= s <=23118:
gy=s*0.008
dy=s*0.19
gh=input("户口是否为城镇(yes/no):")
if gh=="yes" :
gs=s*0.002
ds=s*0.008
else :
gz=s*0
dz=s*0.008
gg=s*0
dg=s*0.005
ge=s*0
de=s*0.008
gl=s*0.02+3
dl=s*0.1
gj=s*0.12
dj=s*0.12
if gh=="yes":
sum_g=gy+gs+gg+ge+gl+gj
sum_d=dy+ds+dg+de+dl+dj
else :
sum_g=gy+gz+gg+ge+gl+gj
sum_d=dy+dz+dg+de+dl+dj
print(" 个人缴费比例 "," 单位缴费比例 ")
print("养老: ",gy, " ",dy )
if gh=="yes":
print("失业(城镇): ",gs," ",ds )
else:
print("失业(农村): ",gz," ",dz )
print("工伤: ",gg," ",dg )
print("生育: ",ge," ",de )
print("医疗: ",gl," ",dl )
print("公积金: ",gj," ",dj )
if gh=="yes":
print("总和: ",sum_g," ",sum_d)
else:
print("总和 : ",sum_g," ",sum_d)
else :
print("输入有误,请重新选择")
def ji():
while True:
shu()
p=int(input("请选择:"))
if p==0:
sum()
else :
break
ji() | [
"shenqianpeng@chengfayun.com"
] | shenqianpeng@chengfayun.com |
2cd5f5426bc43a0bc84ccfce6ac8effb870a32ed | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/layout/scene/yaxis/_title.py | b63b7b0849f6244fbb62a192ed6748838f3e59d9 | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 981 | py | import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self, plotly_name='title', parent_name='layout.scene.yaxis', **kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Title'),
data_docs=kwargs.pop(
'data_docs', """
font
Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
"""
),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
d8ed3b3d2c655e907769e42ef9df45539f087ca7 | c105797a5b6f5aca0b892ccdadbb2697f80fb3ab | /python_base/base15/page/contact_add.py | 94bf9599ce701086eeef43402d08d559ca44f34c | [] | no_license | jj1165922611/SET_hogwarts | 6f987c4672bac88b021069c2f947ab5030c84982 | fbc8d7363af0a4ac732d603e2bead51c91b3f1f7 | refs/heads/master | 2023-01-31T19:41:27.525245 | 2020-12-15T13:43:45 | 2020-12-15T13:43:45 | 258,734,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020-08-19
# @Author : Joey Jiang
# @File : contact_add.py
# @Software : PyCharm
# @Description: 企业微信移动app实战
class ContactAddPage:
def set_name(self):
return self
def set_gender(self):
return self
def set_phonenum(self):
return self
def click_save(self):
from python_base.base15.page.invite_member import InviteMemberPage
return InviteMemberPage() | [
"1165922611@qq.com"
] | 1165922611@qq.com |
f281f525635636d1ed853611666a30eea9df78dc | 1edfe2fe5ae6ed81d1453446569b8fa594738cb9 | /vendor/migrations/0017_auto_20200406_2113.py | a0d6306cadabcc40cc07c28b526785d7c15dd750 | [] | no_license | parvatiandsons2/djangoproject | 71bfbcfa6b06406b8a77ebb30f7d468d787e92dd | 901062c839dde608d9c2c865b61dbc0902988e66 | refs/heads/master | 2022-04-28T21:53:17.333272 | 2020-04-16T12:26:29 | 2020-04-16T12:26:29 | 255,407,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # Generated by Django 3.0.3 on 2020-04-06 15:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendor', '0016_auto_20200406_2027'),
]
operations = [
migrations.AlterField(
model_name='vendor',
name='created_on',
field=models.DateField(default=datetime.datetime(2020, 4, 6, 21, 13, 12, 358322), editable=False, verbose_name='Created On'),
),
]
| [
"parvatiandsons2@gmail.com"
] | parvatiandsons2@gmail.com |
98a41b81268bf6fd96b82174f324e081f5420c49 | 9dee94907e6456a4af9855d358693923c17b4e0d | /1036_Escape_a_Large_Maze.py | d1155bcc42a941421149dd5533ec39b085749b92 | [] | no_license | chien-wei/LeetCode | e215915a8103e56f182040dacc9fb0d6996c86ec | 0d6f414e7610fedb2ec4818ecf88d51aa69e1355 | refs/heads/master | 2021-05-13T14:48:22.891100 | 2019-08-20T05:52:59 | 2019-08-20T05:52:59 | 116,749,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | class Solution:
def isEscapePossible(self, blocked: List[List[int]], source: List[int], target: List[int]) -> bool:
# BFS
bound = 10 ** 6
step_left = len(blocked)
que = [source]
visited = set(tuple(source))
blocked = set(map(tuple, blocked))
while len(que) > 0 and step_left >= 0:
for _ in range(len(que)):
i, j = que.pop(0)
for (x, y) in [(i+1, j), (i-1, j), (i, j+1), (i, j-1)]:
if x >= 0 and x < bound and y >= 0 and y < bound and (x, y) not in blocked and (x, y) not in visited:
if [x, y] == target:
return True
que.append((x, y))
visited.add((x, y))
step_left -= 1
if step_left <= 0:
return True
return False | [
"chien-wei@outlook.com"
] | chien-wei@outlook.com |
5392d476cc5dbf1251c2f97ab77785c857c4811f | 7e62c0c2572ca586e37ba754bccb6c231e08bc1e | /beartype/_decor/_code/_pep/_error/_peperrorgeneric.py | b6916fe1b62384d10135143fae4445f4e6fc040a | [
"MIT"
] | permissive | vault-the/beartype | 1a456ae18fc3814a19d3c1505ecba19e309ce57e | 36ab39df2a7a89ae52c8016c4226c8aa582b390a | refs/heads/master | 2023-02-03T19:39:49.061471 | 2020-12-23T04:57:13 | 2020-12-23T04:57:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,870 | py | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2020 Cecil Curry.
# See "LICENSE" for further details.
'''
**Beartype PEP-compliant type hint call-time utilities** (i.e., callables
operating on PEP-compliant type hints intended to be called by dynamically
generated wrapper functions wrapping decorated callables).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype._decor._code._pep._error._peperrortype import (
get_cause_or_none_type)
from beartype._decor._code._pep._error._peperrorsleuth import CauseSleuth
from beartype._util.hint.utilhinttest import is_hint_ignorable
from beartype._util.hint.pep.proposal.utilhintpep484 import (
get_hint_pep484_generic_base_erased_from_unerased)
from beartype._util.hint.pep.proposal.utilhintpep585 import is_hint_pep585
from beartype._util.hint.pep.utilhintpeptest import is_hint_pep_typing
from typing import Generic
# See the "beartype.__init__" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ GETTERS }....................
def get_cause_or_none_generic(sleuth: CauseSleuth) -> 'Optional[str]':
'''
Human-readable string describing the failure of the passed arbitrary object
to satisfy the passed `PEP 484`_-compliant **generic** (i.e., type hint
subclassing a combination of one or more of the :mod:`typing.Generic`
superclass, the :mod:`typing.Protocol` superclass, and/or other
:mod:`typing` non-class pseudo-superclasses) if this object actually fails
to satisfy this hint *or* ``None`` otherwise (i.e., if this object
satisfies this hint).
Parameters
----------
sleuth : CauseSleuth
Type-checking error cause sleuth.
'''
assert isinstance(sleuth, CauseSleuth), f'{repr(sleuth)} not cause sleuth.'
assert isinstance(sleuth.hint, type), f'{repr(sleuth.hint)} not class.'
assert sleuth.hint_sign is Generic, (
f'{repr(sleuth.hint_sign)} not generic.')
# If this pith is *NOT* an instance of this generic, defer to the getter
# function handling non-"typing" classes.
if not isinstance(sleuth.pith, sleuth.hint):
return get_cause_or_none_type(sleuth)
# Else, this pith is an instance of this generic.
# For each pseudo-superclass of this generic...
for hint_base in sleuth.hint_childs:
# If this pseudo-superclass is an actual superclass, this
# pseudo-superclass is effectively ignorable. Why? Because the
# isinstance() call above already type-checked this pith against the
# generic subclassing this superclass and thus this superclass as well.
# In this case, skip to the next pseudo-superclass.
if isinstance(hint_base, type):
continue
# Else, this pseudo-superclass is *NOT* an actual class.
#
# If this pseudo-superclass is neither a PEP 585-compliant type hint
# *NOR* a PEP-compliant type hint defined by the "typing" module,
# reduce this pseudo-superclass to a real superclass originating this
# pseudo-superclass. See commentary in the "_pephint" submodule.
elif not (is_hint_pep585(hint_base) and is_hint_pep_typing(hint_base)):
hint_base = get_hint_pep484_generic_base_erased_from_unerased(
hint_base)
# Else, this pseudo-superclass is defined by the "typing" module.
# If this superclass is ignorable, do so.
if is_hint_ignorable(hint_base):
continue
# Else, this superclass is unignorable.
# Human-readable string describing the failure of this pith to satisfy
# this pseudo-superclass if this pith actually fails to satisfy
# this pseudo-superclass *or* "None" otherwise.
# print(f'tuple pith: {pith_item}\ntuple hint child: {hint_child}')
pith_base_cause = sleuth.permute(hint=hint_base).get_cause_or_none()
# If this pseudo-superclass is the cause of this failure, return a
# substring describing this failure by embedding this failure (itself
# intended to be embedded in a longer string).
if pith_base_cause is not None:
# print(f'tuple pith: {sleuth_copy.pith}\ntuple hint child: {sleuth_copy.hint}\ncause: {pith_item_cause}')
return f'generic base {repr(hint_base)} {pith_base_cause}'
# Else, this pseudo-superclass is *NOT* the cause of this failure.
# Silently continue to the next.
# Return "None", as this pith satisfies both this generic itself *AND* all
# pseudo-superclasses subclassed by this generic, implying this pith to
# deeply satisfy this hint.
return None
| [
"leycec@gmail.com"
] | leycec@gmail.com |
4e24d5ca8043193ed1a0a31cfc08eca50d55b164 | a9cd0f58ff76ab4b1c65847247fa212b7375f2db | /app/comments/forms.py | 9bbc80888868e54fb548094a9f2718ed27689c4a | [] | no_license | wisnercelucus/ls-sms-saas-advance | 11c32bebba2498653b465b405bcdeaf71804df4c | d465939e11a9461e4425b67dc8be0f9da22bb1a8 | refs/heads/master | 2022-06-29T04:06:50.814004 | 2020-05-09T17:27:50 | 2020-05-09T17:27:50 | 260,078,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | from django import forms
class CommentForm(forms.Form):
content_type = forms.CharField(widget=forms.HiddenInput)
object_id = forms.IntegerField(widget=forms.HiddenInput)
#parent_id = forms.IntegerField(widget=forms.HiddenInput)
content = forms.CharField(widget=forms.Textarea)
| [
"wisnercelicus@gmail.com"
] | wisnercelicus@gmail.com |
3a6a8467aeb29d16bfa252404455c2a04d8f4e78 | c71e5115b895065d2abe4120799ffc28fa729086 | /procon-archive/atcoder.jp/abc146/abc146_b/Main.py | 054237241e3d8734442171d9aa60b29b6b5ce2af | [] | no_license | ken0105/competitive-programming | eb82f92a7b7ad0db601ea341c1441de6c6165064 | f918f85a0ea6dfbe9cac3ef835f80503bb16a75d | refs/heads/master | 2023-06-05T09:55:25.264731 | 2021-06-29T14:38:20 | 2021-06-29T14:38:20 | 328,328,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | import math
from decimal import *
import numpy as np
from collections import deque, Counter
import itertools
if __name__ == '__main__':
n = int(input())
s = list(input())
for i in range(len(s)):
if ord(s[i]) + n > ord("Z"):
s[i] = chr(ord(s[i]) + n - 26)
else:
s[i] = chr(ord(s[i]) + n)
ans = ""
for i in s:
ans += i
print(ans)
| [
"iwata.kenaaa@gmail.com"
] | iwata.kenaaa@gmail.com |
96f1ce56d7dcad95aca5eb6112b0bc3da040a31b | 2b07665c5c6a84c2604f97f85adff6976d6b01fb | /txgsm/protocol.py | 6c1ac70f988567dc0442ba3e83c3a447da3fecca | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | smn/txgsm | f3259d7345752f2c0d8194331dd53e0f536ef9a1 | 5ee35ee4dc8dec40a6bc2023744e7c8745e38c1d | refs/heads/develop | 2020-04-06T03:33:53.444260 | 2017-01-24T03:40:11 | 2017-01-24T03:40:11 | 10,579,937 | 12 | 7 | BSD-3-Clause | 2023-06-19T17:44:28 | 2013-06-09T07:31:48 | Python | UTF-8 | Python | false | false | 4,396 | py | # -*- test-case-name: txgsm.tests.test_protocol -*-
# -*- coding: utf-8 -*-
from twisted.internet import reactor
from twisted.protocols.basic import LineReceiver
from twisted.internet.defer import Deferred
from twisted.python import log
from .utils import quote
from messaging.sms import SmsSubmit, SmsDeliver
class TxGSMProtocol(LineReceiver):
CTRL_Z = '\x1a'
delimiter = '\r\n'
verbose = False
def __init__(self):
# AT switches being purely line oriented and sometimes not
# especially when sending multipart SMSs which has a '^> $' prompt
# without a '\r\n' that we need to wait for.
# As a result using lineReceived() does not always work.
self.setRawMode()
self.deferreds = []
self.buffer = b''
def log(self, msg):
if self.verbose:
log.msg(msg)
def connectionMade(self):
self.log('Connection made')
def send_command(self, command, expect='OK', timeout=None):
self.log('Sending: %r' % (command,))
resp = Deferred()
resp.addCallback(self.debug)
if timeout:
reactor.callLater(timeout, resp.cancel)
self.deferreds.append((command, expect, resp))
self.sendLine(command)
return resp
def debug(self, resp):
self.log('Received: %r' % (resp,))
return resp
def next(self, command, expect='OK'):
def handler(result):
d = self.send_command(command, expect)
d.addCallback(lambda r: result + [r])
return d
return handler
def configure_modem(self):
# Sensible defaults shamelessly copied from pygsm.
d = Deferred()
d.addCallback(self.next('ATE0')) # Disable echo
d.addCallback(self.next('AT+CMGF=0')) # PDU mode
d.addCallback(self.next('AT+CMEE=1')) # More useful errors
d.addCallback(self.next('AT+CSMS=1')) # set SMS mode to phase 2+
d.callback([])
return d
def send_sms(self, msisdn, text):
sms = SmsSubmit(msisdn, text)
# NOTE: The use of the Deferred here is a bit wonky
# I'm using it like this because it makes adding callbacks
# in a for-loop easier since we're potentially sending
# SMSs bigger than 160 chars.
d = Deferred()
for pdu in sms.to_pdu():
d.addCallback(self.next(
'AT+CMGS=%d' % (pdu.length,),
expect='> '))
d.addCallback(self.next('%s%s' % (pdu.pdu, self.CTRL_Z)))
d.callback([])
return d
def dial_ussd_code(self, code):
return self.send_command('AT+CUSD=1,"%s",15' % (quote(code),),
expect='+CUSD')
def list_received_messages(self, status=4):
d = self.send_command('AT+CMGL=%i' % (status,))
def parse_cmgl_response(result):
response = result['response']
# Lines alternative between the +CMGL response and the
# actual PDU containing the SMS
found = False
messages = []
for line in response:
if line.startswith('+CMGL:'):
found = True
elif found:
messages.append(SmsDeliver(line))
found = False
return messages
d.addCallback(parse_cmgl_response)
return d
def probe(self):
"""
See if we're talking to something GSM-like and if so,
try and get some useful information out of it.
"""
d = Deferred()
d.addCallback(self.next('ATE0'))
d.addCallback(self.next('AT+CIMI'))
d.addCallback(self.next('AT+CGMM'))
reactor.callLater(0, d.callback, [])
return d
def rawDataReceived(self, data):
self.buffer += data
if not self.deferreds:
log.err('Unsollicited response: %r' % (data,))
return
_, expect, _ = self.deferreds[0]
if expect in self.buffer:
command, expect, deferred = self.deferreds.pop(0)
return_buffer, self.buffer = self.buffer, b''
result = {
'command': [command],
'expect': expect,
'response': filter(None, return_buffer.split(self.delimiter))
}
deferred.callback(result)
| [
"simon@praekeltfoundation.org"
] | simon@praekeltfoundation.org |
7fae1b15f91d81ef695953a46a052ab14d943d09 | 33f1c49920201e21adaf794c826148d0330db4a1 | /python/dp/117_jump_game_ii.py | 0d49f27baf32e21c3532cd563d068cc8d4110369 | [] | no_license | zsmountain/lintcode | 18767289566ccef84f9b32fbf50f16b2a4bf3b21 | 09e53dbcf3b3dc2b51dfb343bf77799632efd219 | refs/heads/master | 2020-04-04T21:35:07.740575 | 2019-03-16T20:43:31 | 2019-03-16T20:43:31 | 156,291,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | '''
Given an array of non-negative integers, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Your goal is to reach the last index in the minimum number of jumps.
Have you met this question in a real interview?
Example
Given array A = [2,3,1,1,4]
The minimum number of jumps to reach the last index is 2. (Jump 1 step from index 0 to 1, then 3 steps to the last index.)
'''
import math
class Solution:
"""
@param A: A list of integers
@return: An integer
"""
def jump(self, A):
# write your code here
n = len(A)
dp = [math.inf for _ in range(n)]
dp[0] = 0
for i in range(n):
if dp[i] == math.inf:
continue
for j in range(A[i]):
if i + j + 1 >= n:
break
dp[i+j+1] = min(dp[i+j+1], dp[i] + 1)
return dp[-1]
s = Solution()
print(s.jump([2, 3, 1, 1, 4])) | [
"zsmountain27@gmail.com"
] | zsmountain27@gmail.com |
9b07c8deaf09327871b26d7ff2963b55c3a77696 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/layout/radialaxis/_ticksuffix.py | 40442b42e7ab24400f454a332668a7cb1cdd3b92 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 455 | py | import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='ticksuffix',
parent_name='layout.radialaxis',
**kwargs
):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
6fa2530c5ef358c0adaa6cec3b04535da0452154 | 486820178701ecb337f72fd00cd2e281c1f3bbb2 | /teuthology_master/virtualenv/bin/dynamodb_dump | 98f14105e218197be6337df38abe6ff012ad3e47 | [
"MIT"
] | permissive | hgichon/anycloud-test | 9e0161bc563a20bd048ecff57ad7bf72dcb1d420 | 0d4cd18d8b6bb4dcf1b59861fea21fefe6a2c922 | refs/heads/master | 2016-09-11T09:32:23.832032 | 2015-06-24T00:58:19 | 2015-06-24T00:58:19 | 35,654,249 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | #!/home/teuthworker/src/teuthology_master/virtualenv/bin/python
import argparse
import errno
import os
import boto
from boto.compat import json
DESCRIPTION = """Dump the contents of one or more DynamoDB tables to the local filesystem.
Each table is dumped into two files:
- {table_name}.metadata stores the table's name, schema and provisioned
throughput.
- {table_name}.data stores the table's actual contents.
Both files are created in the current directory. To write them somewhere else,
use the --out-dir parameter (the target directory will be created if needed).
"""
def dump_table(table, out_dir):
metadata_file = os.path.join(out_dir, "%s.metadata" % table.name)
data_file = os.path.join(out_dir, "%s.data" % table.name)
with open(metadata_file, "w") as metadata_fd:
json.dump(
{
"name": table.name,
"schema": table.schema.dict,
"read_units": table.read_units,
"write_units": table.write_units,
},
metadata_fd
)
with open(data_file, "w") as data_fd:
for item in table.scan():
# JSON can't serialize sets -- convert those to lists.
data = {}
for k, v in item.iteritems():
if isinstance(v, (set, frozenset)):
data[k] = list(v)
else:
data[k] = v
data_fd.write(json.dumps(data))
data_fd.write("\n")
def dynamodb_dump(tables, out_dir):
try:
os.makedirs(out_dir)
except OSError as e:
# We don't care if the dir already exists.
if e.errno != errno.EEXIST:
raise
conn = boto.connect_dynamodb()
for t in tables:
dump_table(conn.get_table(t), out_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="dynamodb_dump",
description=DESCRIPTION
)
parser.add_argument("--out-dir", default=".")
parser.add_argument("tables", metavar="TABLES", nargs="+")
namespace = parser.parse_args()
dynamodb_dump(namespace.tables, namespace.out_dir)
| [
"hgichon@gmail.com"
] | hgichon@gmail.com | |
c0ffe136fb21ccfde99bf631f6ac8b64fa28cc7b | b027bb7d56ddde072aac411365d2c39d1f798fdb | /fig5.py | 8add7894b9423b66cfb3fe611bfb9918a63c6082 | [] | no_license | ModelDBRepository/185355 | 36d96a7246b0890295bfacd8bcb8849b9d51008d | 3f9dcaa5cdf43ab6a45e3ad5c988268abc6434bc | refs/heads/master | 2022-08-09T20:16:09.371047 | 2022-05-27T15:49:36 | 2022-05-27T15:49:36 | 189,301,388 | 0 | 2 | null | 2022-04-11T20:26:13 | 2019-05-29T21:26:20 | AMPL | UTF-8 | Python | false | false | 625 | py | ### Analysis of DG network data ###
# This Python code creates a scatter plot of output vs input sim scores.
# Enter the idname
# ModelDB file along with publication:
# Yim MY, Hanuschkin A, Wolfart J (2015) Hippocampus 25:297-308.
# http://onlinelibrary.wiley.com/doi/10.1002/hipo.22373/abstract
# modified and augmented by
# Man Yi Yim / 2015
# Alexander Hanuschkin / 2011
idname = "-pp16-gaba4-kir4-st10"
execfile('plot_DG_all.py')
execfile('GCinput.py')
execfile('inout_pattern.py')
execfile('sim_score.py')
print 'Set the idname as ', idname, ' in FitSimScore_ForallFigs.m and run the file in Matlab for data fitting' | [
"tom.morse@yale.edu"
] | tom.morse@yale.edu |
836ef21d26e7b5dc95fd5d5535b8894dc9071f69 | e43992fc43701df30b75002831265b19fae13f21 | /code_examples/popart/block_sparse/examples/mnist/conftest.py | 10e2b38ba57646650051b8684c2a4b6ded8558c9 | [
"MIT"
] | permissive | techthiyanes/examples | 9e6143f4e6efb935d7d20588f25dd50bf457ccea | 788ead557c9d9eaee1cea7fea516696b188eb841 | refs/heads/master | 2023-07-08T07:16:00.843617 | 2021-08-18T13:49:48 | 2021-08-18T13:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import sys
from common import download_mnist
parent_dir = os.path.join(os.path.dirname(__file__), "..")
sys.path.append(parent_dir)
from utils import build_custom_ops
so_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../../custom_ops.so")
build_custom_ops(so_path)
download_mnist(os.path.dirname(__file__))
| [
"philb@graphcore.ai"
] | philb@graphcore.ai |
a50de666aea61cb5d6e800f794145d0e6e72e32d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1482494_0/Python/shaunren/p2.py | d02d16128d891683e70cb0b44749866b7904ca3e | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | #!/usr/bin/env python
t = int(input())
for case in range(1,t+1):
n = int(input())
lvls = []
played = [[False,False] for _ in range(n)]
for i in range(n):
a,b = map(int,input().strip().split())
lvls.append((b,a,i))
lvls.sort()
total = 0
stars = 0
while len(lvls):
for k in range(len(lvls)):
b,a,i = lvls[k]
if stars >= b and not played[i][1]:
#print(b, stars)
#print('Play L{} Star2'.format(i))
total += 1
stars += (1 if played[i][0] else 2)
played[i][0] = played[i][1] = True
del lvls[k]
break
elif stars >= a and not played[i][0]:
#print('Play L{} Star1'.format(i))
total += 1
stars += 1
played[i][0] = True
break
else:
total = 'Too Bad'
break
print('Case #{}: {}'.format(case, total))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
f79aa18cce93645b6994ba54e85405c605a5482f | d0609d9d508a9bfbcd0f43d43a10fbfda2e5b7cf | /sparse_coding/gpu/ke_sparse_coding_pytorch/EE290T_quantized_sparse_codes/analysis_transforms/ista.py | 867c2e3ab570b26ed32aa17763182c154c2765ec | [
"BSD-3-Clause"
] | permissive | csinva/dnn-experiments | cf66579d282c0cbbadc1a84333f88b9a55b4e8ba | 5b418cb125c4eb0b510925c7a70fd87ce5978841 | refs/heads/master | 2021-07-11T00:40:30.237545 | 2020-06-13T22:39:36 | 2020-06-13T22:39:36 | 143,078,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | """
Implementation of Iterative Soft Thresholding
"""
import torch
from torch.autograd import Variable
def run(images, dictionary, sparsity_weight, max_num_iters,
convergence_epsilon=1e-3, nonnegative_only=False):
"""
Runs steps of Iterative Soft Thresholding w/ constant stepsize
Termination is at the sooner of 1) code changes by less then
convergence_epsilon, (per component, normalized by stepsize, on average)
or 2) max_num_iters have been taken.
Parameters
----------
images : torch.Tensor(float32, size=(n, b))
An array of images (probably just small patches) that to find the sparse
code for. n is the size of each image and b is the number of images in
this batch
dictionary : torch.Tensor(float32, size=(n, s))
This is the dictionary of basis functions that we can use to descibe the
images. n is the size of each image and s in the size of the code.
sparsity_weight : torch.Tensor(float32)
This is the weight on the sparsity cost term in the sparse coding cost
function. It is often denoted as \lambda
max_num_iters : int
Maximum number of steps of ISTA to run
convergence_epsilon : float, optional
Terminate if code changes by less than this amount per component,
normalized by stepsize. Default 1e-3.
nonnegative_only : bool, optional
If true, our code values can only be nonnegative. We just chop off the
left half of the ISTA soft thresholding function and it becomes a
shifted RELU function. The amount of the shift from a generic RELU is
precisely the sparsity_weight. Default False
Returns
-------
codes : torch.Tensor(float32, size=(s, b))
The set of codes for this set of images. s is the code size and b in the
batch size.
"""
# Stepsize set by the largest eigenvalue of the Gram matrix. Since this is
# of size (s, s), and s >= n, we want to use the covariance matrix
# because it will be of size (n, n) and have the same eigenvalues
# ** For LARGE values of d = min(s, n), this will become a computational
# bottleneck. Consider setting lipschitz constant based on the
# backtracking rule outlined in Beck and Teboulle, 2009.
lipschitz_constant = torch.symeig(
torch.mm(dictionary, dictionary.t()))[0][-1]
stepsize = 1. / lipschitz_constant
# codes = images.new_zeros(dictionary.size(1), images.size(1))
codes = Variable(torch.zeros((dictionary.size(1), images.size(1)))).cuda()
old_codes = codes.clone()
avg_per_component_change = torch.mean(torch.abs(codes - old_codes))
iter_idx = 0
while (iter_idx < max_num_iters and
(avg_per_component_change > convergence_epsilon or iter_idx == 0)):
old_codes = codes.clone()
# gradient of l2 term is <dictionary^T, (<dictionary, codes> - images)>
codes.sub_(stepsize * torch.mm(dictionary.t(),
torch.mm(dictionary, codes) - images))
#^ pre-threshold values x - lambda*A^T(Ax - y)
if nonnegative_only:
codes.sub_(sparsity_weight * stepsize).clamp_(min=0.)
#^ shifted rectified linear activation
else:
pre_threshold_sign = torch.sign(codes)
codes.abs_()
codes.sub_(sparsity_weight * stepsize).clamp_(min=0.)
codes.mul_(pre_threshold_sign)
#^ now contains the "soft thresholded" (non-rectified) output
avg_per_component_change = torch.mean(torch.abs(codes - old_codes) /
stepsize)
iter_idx += 1
return codes
| [
"chandan_singh@berkeley.edu"
] | chandan_singh@berkeley.edu |
4b72e132593b28e159d0b3e0f261b5b0eeb0271c | 2abfbb36a9782c7c3835d22432c4e6bf7486a5a2 | /K-means_with_SciPy/K_means_with_SciPy.py | 9bde6a4160f6e7edfaff39be5374c9600634f9e5 | [] | no_license | iamraufodilov/K-means_with_SciPy | a679d3f4aaf72cd1ca420c4de6f7477af7f3a3e3 | 7db3cfa4cd56d2bf139337bf7e70d6d9aa7a45cf | refs/heads/master | 2023-03-19T19:40:03.929650 | 2021-03-09T07:40:55 | 2021-03-09T07:40:55 | 345,922,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | #import k means
from scipy.cluster.vq import kmeans, vq, whiten
#data generation
from numpy import vstack, array
from numpy.random import rand
#data generating with three feature
data = vstack((rand(100, 3) + array([.5,.5,.5]), rand(100, 3)))
#whiten the data
data = whiten(data)
#compute kmeans with three cluster
centroids,_ = kmeans(data, 3)
print(centroids)
#assign each sample to centroids
clx,_ = vq(data, centroids)
print(clx)
| [
"applyjobrauf@gmail.com"
] | applyjobrauf@gmail.com |
b6841fc285b38e52d887a65af68e3677455bcc61 | 25613ba34d077a6b4e328dd81cb60a597e62d0fb | /ajaxify/migrations/0001_initial.py | a5e5bde19359d898115118bf797942b9f613bacb | [] | no_license | MahfuzKhandaker/ajaxdjango | 9e10fa2f18d71f0088bf9f5db6c499f07f320b34 | 30aa417bb6a1e7a9c357e8e56bf31a514668ceb6 | refs/heads/main | 2023-04-11T10:11:41.076493 | 2021-04-09T14:45:03 | 2021-04-09T14:45:03 | 354,635,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | # Generated by Django 3.2 on 2021-04-07 16:52
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=20, unique=True)),
],
options={
'verbose_name_plural': 'categories',
'ordering': ['-name'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('main_image', models.ImageField(blank=True, upload_to='images/')),
('title', models.CharField(max_length=125)),
('slug', models.SlugField(unique=True)),
('summary', models.CharField(blank=True, max_length=255, null=True)),
('description', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('published_date', models.DateTimeField(blank=True, null=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('read_time', models.IntegerField(default=0)),
('number_of_views', models.IntegerField(blank=True, default=0, null=True)),
('is_featured', models.BooleanField(default=False)),
('categories', models.ManyToManyField(related_name='posts', to='ajaxify.Category')),
('likes', models.ManyToManyField(blank=True, related_name='post_likes', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'posts',
'ordering': ['-created_on'],
},
),
migrations.AddIndex(
model_name='post',
index=models.Index(fields=['id'], name='id_index'),
),
]
| [
"mahfuzkhandaker.bd@gmail.com"
] | mahfuzkhandaker.bd@gmail.com |
aae73de91c7205183a32a3ab0bcfc068e0f142e2 | 86871876283b07e4308d716060ed5908f54da785 | /backend/users/models.py | 4d99f970a7195e864f7c0e6c1c62d144a325e7c2 | [] | no_license | crowdbotics-apps/mobile-200-dev-7931 | 1183dfca569b6a504824aa499c5edd1cdce9af34 | b9f81e49911198521f0eeab348d92664d7dad120 | refs/heads/master | 2022-11-24T12:09:29.874411 | 2020-07-20T13:57:30 | 2020-07-20T13:57:30 | 281,034,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
name = models.CharField(null=True, blank=True, max_length=255,)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
class Gjhuyuit(models.Model):
"Generated Model"
hfhfhgf = models.BigIntegerField()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
851f2c3f7e9147e6c23df8791eb95178ec34f663 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/393/usersdata/315/72418/submittedfiles/swamee.py | 704a1282b6dfccfa9f3dcbbd59c0779bee531c42 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
# CONSTANTES
g = 9.81
e = 0.000002
#ENTRADAS
f = float(input('Digite valor de f: '))
L = float(input('Digite valor de L: '))
Q = float(input('Digite valor de Q: '))
DH = float(input('Digite valor de DH: '))
v = float(input('Digite valor de v: '))
#FORMULA PARA D
x = (8*f*L*(Q**2))
y = ((math.pi**2)*g*DH)
D = ((8*f*L*(Q**2))/((math.pi**2)*g*DH))**(1/5)
#D = (x/y)**(1/5)
#FORMULA PARA REY
Rey = (4*Q)/(math.pi*D*v)
#FORMULA PARA K
z = e/3.7*D
p = 5.74/(Rey**0.9)
k = 0.25/(math.log10(z+p))**2
#SAIDAS
print ('%.4f' %D)
print ('%.4f' %Rey)
print ('%.4f' %k)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
9e39d28f27026a466e6195157821578973cf2da3 | 90ec137d760e2c5f094e82f0f4456c04f5ac98dc | /tests/__init__.py | b1f3643451e167af86f2c62ca6facca8bbdd373c | [] | no_license | d0nin380/big-brother-bot | 8adc5d35e37e6eb9f6b67e431072e596a24211ef | 949aa0b0c82658795eea43474d220bfbaaba861f | refs/heads/master | 2021-01-18T04:33:19.093850 | 2013-03-25T02:34:59 | 2013-03-25T02:34:59 | 8,996,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | #
# BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2011 Courgette
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import logging
import threading
import sys
from b3.config import XmlConfigParser
import b3.output # do not remove, needed because the module alters some defaults of the logging module
log = logging.getLogger('output')
log.setLevel(logging.WARNING)
from mock import Mock
import time
import unittest2 as unittest
testcase_lock = threading.Lock() # together with flush_console_streams, helps getting logging output related to the
# correct test in test runners such as the one in PyCharm IDE.
class logging_disabled(object):
"""
context manager that temporarily disable logging.
USAGE:
with logging_disabled():
# do stuff
"""
DISABLED = False
def __init__(self):
self.nested = logging_disabled.DISABLED
def __enter__(self):
if not self.nested:
logging.getLogger('output').propagate = False
logging_disabled.DISABLED = True
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.nested:
logging.getLogger('output').propagate = True
logging_disabled.DISABLED = False
def flush_console_streams():
sys.stderr.flush()
sys.stdout.flush()
class B3TestCase(unittest.TestCase):
def setUp(self):
testcase_lock.acquire()
flush_console_streams()
# create a FakeConsole parser
self.parser_conf = XmlConfigParser()
self.parser_conf.loadFromString(r"""<configuration/>""")
with logging_disabled():
from b3.fake import FakeConsole
self.console = FakeConsole(self.parser_conf)
self.console.screen = Mock()
self.console.time = time.time
self.console.upTime = Mock(return_value=3)
self.console.cron.stop()
def myError(msg, *args, **kwargs):
print(("ERROR: %s" % msg) % args)
self.console.error = myError
def tearDown(self):
flush_console_streams()
testcase_lock.release() | [
"courgette@bigbrotherbot.net"
] | courgette@bigbrotherbot.net |
2e6152619157902ae4f10b42bf7c492e71fe6ebb | 449462388d7f0375c52009ec7420a6096a4b87f9 | /pynodes/serial_joy_to_servo.py | 8337f9eef3bfafc20330ae8ed5831ec48e4cba70 | [] | no_license | amlalejini/misc_arduino_ros_examples | 498a35a824e75bb63d73279a3765a2883056b935 | f5d4ceab10028bdbeff54b048c255ece42cf3255 | refs/heads/master | 2016-08-12T05:39:00.904919 | 2016-01-10T01:53:19 | 2016-01-10T01:53:19 | 49,342,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,204 | py | #!/usr/bin/python
import rospy, serial, atexit
from sensor_msgs.msg import Joy
'''
This node interfaces with an arduino programmed with the servo_control_example code.
Input is received from joy node.
Serial servo command packet format: label:value\n
'''
###################################
# Controller mappings in joy message
CONTROLLER_BUTTONS = {"A": 0, "B":1, "X": 2, "Y": 3, "R1": 5, "L1": 4, "BACK": 6, "START": 7} # TODO: FINISH THIS, USE BELOW
CONTROLLER_AXES = {"LSTICKV": 1, "LSTICKH": 0}
# Servo control
JOY_SERVO_AXIS = CONTROLLER_AXES["LSTICKH"]
DFLT_JOY_AXIS_THRESH = 0.5
# Below are default settings (overriden by param file)
# Servo Constants
DFLT_MAX_SERVO_ANGLE = 150
DFLT_MIN_SERVO_ANGLE = 5
DFLT_SERVO_INC = 5
# Initial Angle
DFLT_INIT_SERVO_ANGLE = 90
# Joystick topic name
DFLT_JOY_TOPIC_NAME = "joy"
# Arduino Port
DFLT_ARDUINO_PORT = "/dev/ttyACM1"
# Arduino Baud
DFLT_ARDUINO_BAUD = 9600
###################################
class JoyToServo(object):
def __init__(self):
# Initialize as ros node
rospy.init_node("JoyToServo")
# Initialize some variables
self.current_angle = rospy.get_param("servo/initial_angle", DFLT_INIT_SERVO_ANGLE)
self.max_servo_angle = rospy.get_param("servo/max_angle", DFLT_MAX_SERVO_ANGLE)
self.min_servo_angle = rospy.get_param("servo/min_angle", DFLT_MIN_SERVO_ANGLE)
self.servo_increment = rospy.get_param("servo/increment", DFLT_SERVO_INC)
self.joystick_topic = rospy.get_param("joystick/topic", DFLT_JOY_TOPIC_NAME)
self.joy_axis_thresh = rospy.get_param("joystick/axis_thresh", DFLT_JOY_AXIS_THRESH)
self.joy_received = False
self.controller_state = Joy()
self.arduino = None # This will keep our serial connection to the arduino
self.arduino_port = rospy.get_param("arduino/port", DFLT_ARDUINO_PORT)
self.arduino_baud = rospy.get_param("arduino/baud", DFLT_ARDUINO_BAUD)
# Setup subscription to joystick topic
rospy.Subscriber(self.joystick_topic, Joy, self.joy_callback)
# Attempt to connect to arduino
while not rospy.is_shutdown():
try:
self.arduino = serial.Serial(self.arduino_port, self.arduino_baud, timeout = 1)
except:
rospy.logerr("Failed to connect to Arduino. Will continue trying.")
rospy.sleep(3)
else:
rospy.loginfo("Connected to Arduino on port %s." % self.arduino_port)
rospy.loginfo("Initializing servo angle to: %d" % self.current_angle)
self.arduino.write(str("0:%d\n" % self.current_angle))
break
# Register cleanup function to run on exit
atexit.register(self._cleanup)
def limit(self, value):
'''
This function clips the given value to max or min if > max or < min
'''
if value > self.max_servo_angle:
return self.max_servo_angle
elif value < self.min_servo_angle:
return self.min_servo_angle
else:
return value
def joy_callback(self, msg):
'''
Joy topic callback function (called anytime a message is sent over joy topic)
'''
# Only care about meaningful joy messages
if abs(msg.axes[JOY_SERVO_AXIS]) > self.joy_axis_thresh:
self.controller_state = msg
self.joy_received = True
def run(self):
'''
Run function: process incoming messages (translate and send to arduino as servo commands)
'''
# Wait for a message to come over joystick topic before running
rospy.wait_for_message(self.joystick_topic, Joy)
# Set the rate this node will run at (running as fast as we can will kill ROS)
rate = rospy.Rate(5)
# Run!
while not rospy.is_shutdown():
if self.joy_received:
# Grab most recent controller state
current_state = self.controller_state
cntrl_cmd = current_state.axes[JOY_SERVO_AXIS]
# Calculate target angle from controller command
# If negative (< -0.5), decrement current angle by 2
targ_angle = self.current_angle
if cntrl_cmd < -self.joy_axis_thresh:
targ_angle = self.limit(self.current_angle - self.servo_increment)
elif cntrl_cmd > self.joy_axis_thresh:
targ_angle = self.limit(self.current_angle + self.servo_increment)
else:
targ_angle = self.limit(self.current_angle)
# If positive (> 0.5), increment current angle by 2
# Otherwise, do nothing
self.arduino.write("0:%d\n" % targ_angle)
self.current_angle = targ_angle
rospy.loginfo("Setting servo angle to %d" % self.current_angle)
self.joy_received = False
rate.sleep()
def _cleanup(self):
"""Called at exit to close connection to Arduino"""
self.arduino.close()
if __name__ == "__main__":
node = JoyToServo()
node.run()
| [
"amlalejini@gmail.com"
] | amlalejini@gmail.com |
cee4566eaf5c52bb29eeb8f049801d51d3a0b241 | e82245a9e623ef3e2b4b9c02f0fd932c608c4484 | /f00b4r.w1thg00g13.com/07-lvl4-1 unsolved/exploration.py | 761bb51fd2d0e68a2b531fd1d395ef3b782162f0 | [] | no_license | Zylophone/Programming-for-Sport | 33e8161028cfddce3b7a1243eb092070107342e3 | 193d6184f939303d8661f68d6fd06bdec95df351 | refs/heads/master | 2020-06-16T23:11:44.719286 | 2017-05-21T17:10:46 | 2017-05-21T17:10:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,011 | py | n = 250
zeros, ones, twos, threes = [], [], [], []
for i in range(n):
ones.append(4*i + 1)
twos.append(4*i + 2)
threes.append(4*i + 3)
zeros.append(4*i + 4)
def inf_loop(pair):
pairs_observed = set()
return _inf_loop(pair, pairs_observed)
def _inf_loop(pair, pairs_observed):
a, b = pair
a, b = min(a,b), max(a,b)
if a == b:
return False
if (a, b) in pairs_observed:
return True
pairs_observed.add( (a,b) )
return _inf_loop( (2*a,b-a), pairs_observed )
def explore(group, group_str):
''' Takes zeros/ones/twos/threes as input '''
n = len(group)
print "Which pair of {} lead to inf loop (desired) and lead to equil?".format(group_str)
print ""
print "finding answer for all pairs from {}".format(group)
print ""
for i in range(n):
for j in range(i+1, n):
a, b = group[i], group[j]
pair = (a, b)
if inf_loop(pair):
outp_str = "leads to inf loop"
assert not power_of_2(a+b)
# assert predicting_inf_loop_based_on_group_comparison_after_reduction(a, b)
correct = predicting_inf_loop_based_on_group_comparison_after_reduction(a, b)
prediction_is_wrong = not correct
if prediction_is_wrong:
reduced_pair = reduce(pair)
r_a, r_b = reduced_pair
print "prediction based on group comparison after reduction was wrong for pair ({}, {}) which reduced to {}. The groups of the reduced pair is ({}, {}) ".format(a, b, reduced_pair, group_of(r_a), group_of(r_b))
else:
outp_str = "leads to equil"
# known reasons
# - a+b is a power of 2
# - 3*a == b as in (12, 36), therefore (a, b) = (a, 3a) -> (2a, 2a)
if power_of_2(a+b) or 3*a == b or a == 3*b:
continue
wrong = predicting_inf_loop_based_on_group_comparison_after_reduction(a, b)
assert not wrong
clarification_str = "FOR UNKNOWN REASON"
print "the pair ({}, {}) {} + {}. Note, {} + {} = {}. Note, reduces to {}".format(
a,
b,
outp_str,
clarification_str,
a,
b,
a+b,
reduce((a,b))
)
#print ""
def predicting_inf_loop_based_on_group_comparison_after_reduction(a, b):
reduced_pair = reduce((a,b))
reduced_a, reduced_b = reduced_pair
group_reduced_a = group_of(reduced_a)
group_reduced_b = group_of(reduced_b)
if group_reduced_a == 0:
return group_reduced_b in [1,2,3] # we expect an inf loop when reduced_a is taken from group 0 and reduced_b is taken from any other group
if group_reduced_a == 1:
# barring a == b
return group_reduced_b in [0,1,2] # we expect an inf loop when reduced_a is taken from group 1 and reduced_b is taken from group 0,1, or 2
if group_reduced_a == 2:
return group_reduced_b in [0,1,3] # we expect an inf loop when reduced_a is taken from group 2 and reduced_b is taken from any other group
if group_reduced_a == 3:
# barring a == b
return group_reduced_b in [0,2,3] # we expect an inf loop when reduced_a is taken from group 3 and reduced_b is taken from group 0,2, or 3
# unreachable
raise
def group_of(a):
return a % 4
def reduce(pair):
a, b = pair
a, b = min(a,b), max(a,b)
while a % 2 == 0 and b % 2 == 0:
a /= 2
b /= 2
return (a,b)
def power_of_2(x):
if x == 0:
return False
return x & (x-1) == 0
explore(threes, "threes") | [
"jfv33@cornell.edu"
] | jfv33@cornell.edu |
fc369735b944f095d0fc4cbbf3541dab7e931089 | faf489dd4c2bd7cdf4f9e6a9cbfd6f0484fd873c | /posts/migrations/0001_initial.py | 911afc61a746f92d9a3b2f73986c9064559bdc7b | [] | no_license | liangsongyou/mb-app | cf262b0a472ee8d7ec838c92bde2672979e4cb84 | bca84e74bd99280a3d574f6cba15e7faf7930394 | refs/heads/master | 2020-04-11T07:28:24.844678 | 2018-12-13T09:08:32 | 2018-12-13T09:08:32 | 161,611,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # Generated by Django 2.1.4 on 2018-12-13 07:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
]
| [
"yuebei58@gmail.com"
] | yuebei58@gmail.com |
ee818a9519f21d4bcc71bda874a90330f47b83f3 | 61c1e9dd24c0564ff18d7ecab5bb12de95244c8c | /py_src/cmsproject/settings/production.py | 44be04ba67a6687d22b4bdcc9e86f70eadf98a36 | [] | no_license | divio/djangocms-project | 3512e24bec9a8582a10f5c321dc066104519a9d1 | 0d96af52d34eadc75ecc1e7188cd5748fde90cf6 | refs/heads/master | 2023-03-25T10:44:53.753082 | 2010-02-06T17:56:27 | 2010-02-06T17:56:27 | 504,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from cmsproject.settings.base import *
#CACHE_BACKEND = 'memcached://127.0.0.1:11211/?timeout=300&max_entries=300'
DATABASE_ENGINE = 'mysql'
DATABASE_NAME = 'cmsproject_live'
DATABASE_USER = 'root'
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
| [
"stefan.foulis@gmail.com"
] | stefan.foulis@gmail.com |
14fea32e585e79eff24b8aacecd8ad5193947a22 | 7e0ecd477d7fce0fb927a2d4870e805c181687d8 | /markov.py | b2e6693cc4dd049a0b3a493111edf2d38dddcbeb | [] | no_license | carly/Markov-Chains2 | 9b4e28a9b9c2382453ac98845aed4c58c2307ee9 | 0c4286539b8ca873b8f97026adadc3cfa22bbca5 | refs/heads/master | 2021-01-17T14:38:45.348305 | 2015-07-10T01:00:56 | 2015-07-10T01:00:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py | import sys
from random import choice
class SimpleMarkovGenerator(object):
def read_files(self, the_file1, the_file2):
"""Given a list of files, make chains from them."""
file_object1 = open(the_file1)
all_text1 = file_object1.read()
corpus_text1 = all_text1.replace("\n", " ").split(" ")
file_object2 = open(the_file2)
all_text2 = file_object2.read()
corpus_text2 = all_text2.replace("\n", " ").split(" ")
corpus_text = corpus_text1 + corpus_text2
self.corpus_text = corpus_text
self.make_chains()
def make_chains(self):
"""Takes input text as string; stores chains."""
chain_dict = {}
for i in range(len(self.corpus_text)-2):
key = tuple([self.corpus_text[i], self.corpus_text[i +1]])
value = self.corpus_text[i+2]
chain_dict.setdefault(key, []).append(value)
self.chains = chain_dict
def make_text(self, limit=150):
"""Takes dictionary of markov chains; returns random text."""
random_key = choice(self.chains.keys())
random_val = choice(self.chains[random_key])
first_phrase = [random_key[0], random_key[1], random_val]
next_key = (first_phrase[-2], first_phrase[-1])
while next_key in self.chains:
next_key_list = list(next_key)
check_limit_list = first_phrase + next_key_list
check_limit = " ".join(check_limit_list)
if len(check_limit) < limit:
first_phrase.append(choice(self.chains[next_key]))
next_key = (first_phrase[-2], first_phrase[-1])
else:
break
sentence = " ".join(first_phrase)
return sentence
if __name__ == "__main__":
Test_Markov_Generator = SimpleMarkovGenerator()
Test_Markov_Generator.read_files(sys.argv[1], sys.argv[2])
sentence = Test_Markov_Generator.make_text()
print sentence
| [
"info@hackbrightacademy.com"
] | info@hackbrightacademy.com |
b2c67ba6d1cd48a3d2211295566172a3171a6d3a | 788db9a7ad4c6b17504e42506aed5fcad0fad082 | /src/silva/app/news/tests/test_news_item.py | 3faf4c5588e1767e3704c85a1eded9932da941d1 | [] | no_license | silvacms/silva.app.news | 16ca2b32e80a11f1cd5f2399abcb814596ed6c2b | 68593c75600de0fa73e23f2b4e4d4a0dafec469d | refs/heads/master | 2016-09-05T20:22:30.118393 | 2014-01-07T14:13:16 | 2014-01-07T14:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 Infrae. All rights reserved.
# See also LICENSE.txt
import unittest
from zope.interface.verify import verifyObject
from Products.Silva.ftesting import public_settings
from Products.Silva.testing import tests
from silva.core.interfaces import IPublicationWorkflow
from silva.app.news.interfaces import INewsItem, INewsItemVersion
from silva.app.news.testing import FunctionalLayer
class NewsItemTestCase(unittest.TestCase):
"""Test the NewsItem content type.
"""
layer = FunctionalLayer
def setUp(self):
self.root = self.layer.get_application()
self.layer.login('editor')
def test_item(self):
factory = self.root.manage_addProduct['silva.app.news']
with tests.assertTriggersEvents('ContentCreatedEvent'):
factory.manage_addNewsItem('item', 'News Item')
item = self.root._getOb('item', None)
self.assertTrue(verifyObject(INewsItem, item))
version = item.get_editable()
self.assertTrue(verifyObject(INewsItemVersion, version))
def test_rendering(self):
factory = self.root.manage_addProduct['silva.app.news']
factory.manage_addNewsItem('item', 'News Item')
IPublicationWorkflow(self.root.item).publish()
with self.layer.get_browser(public_settings) as browser:
self.assertEqual(browser.open('/root/item'), 200)
self.assertEqual(browser.inspect.title, [u'News Item'])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(NewsItemTestCase))
return suite
| [
"thefunny@gmail.com"
] | thefunny@gmail.com |
5dae99fc6af92568b0a1c6875e97bc3c8d56929a | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Binary Search/median_of_two_sorted_arrays.py | 72dd35967b478a4ed531e152475a1dd5d00d434f | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
if len(nums1) > len(nums2):
nums1, nums2 = nums2, nums1
x, y = len(nums1), len(nums2)
low, high = 0, x
half_len = (x + y + 1) // 2
is_even = ((x + y) % 2) == 0
while low <= high:
partition_x = (low + high) // 2
partition_y = half_len - partition_x
max_left_x = float("-inf") if partition_x == 0 else nums1[partition_x - 1]
min_right_x = float("inf") if partition_x == x else nums1[partition_x]
max_left_y = float("-inf") if partition_y == 0 else nums2[partition_y - 1]
min_right_y = float("inf") if partition_y == y else nums2[partition_y]
if max_left_x <= min_right_y and max_left_y <= min_right_x:
if is_even:
return (max(max_left_x, max_left_y) + min(min_right_x, min_right_y)) / 2
else:
return max(max_left_x, max_left_y)
elif max_left_x > min_right_y:
high = partition_x - 1
else:
low = partition_x + 1
| [
"iamharshvirani7@gmail.com"
] | iamharshvirani7@gmail.com |
38f0d76a284d07fb867464b3301d11b4a96466d9 | 66c6e36b3cbaaa459107555d3081798b9cf7a2f7 | /tests/profiling/sim_loop.py | adac5299d824f1cb2752614d133223c2ffad28d4 | [
"MIT"
] | permissive | QDevil/pygears | 6a68fc0cc52899573432082aaf5b9c0853f0b6f6 | a0b21d445e1d5c89ad66751447b8253536b835ee | refs/heads/master | 2023-08-25T18:41:44.583627 | 2021-03-25T14:37:06 | 2021-03-25T14:37:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import time
import cProfile, pstats, io
from pygears import gear
from pygears.typing import Uint
from pygears.lib.verif import drv
# from pygears.sim import sim
from pygears.sim.extens.sim_extend import SimExtend
from pygears.lib import shred
class Profiler(SimExtend):
def before_run(self, sim):
self.pr = cProfile.Profile()
self.pr.enable()
def after_run(self, sim):
self.pr.disable()
s = io.StringIO()
ps = pstats.Stats(self.pr, stream=s).sort_stats('time')
ps.print_stats()
ps.dump_stats('/tools/home/tmp/pygears.profile')
print(s.getvalue())
@gear
async def passthrough(din: Uint[16]) -> Uint[16]:
async with din as d:
yield d
# d = drv(t=Uint[16], seq=list(range(4000)))
# for _ in range(20):
# d = d | passthrough
# d | shred
# t = time.time()
# # sim(extens=[Profiler])
# sim()
# print("%.3f" % (time.time()-t))
| [
"bogdan.vukobratovic@gmail.com"
] | bogdan.vukobratovic@gmail.com |
1be869fa6b09f3d9ab72d0db1f311ba2d6f9bf51 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /build/run_swarming_xcode_install.py | 1ed09263ff18e3307064dad18b376118ee47d89b | [
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 3,039 | py | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs swarming_xcode_install on the bots. It should be run when we
need to upgrade all the swarming testers. It:
1) Packages two python files into an isolate.
2) Runs the isolate on swarming machines that satisfy certain dimensions.
Example usage:
$ ./build/run_swarming_xcode_install.py --luci_path ~/work/luci-py \
--swarming-server touch-swarming.appspot.com \
--isolate-server touch-isolate.appspot.com
"""
from __future__ import print_function
import argparse
import os
import shutil
import subprocess
import sys
import tempfile
def main():
parser = argparse.ArgumentParser(
description='Run swarming_xcode_install on the bots.')
parser.add_argument('--luci_path', required=True, type=os.path.abspath)
parser.add_argument('--swarming-server', required=True, type=str)
parser.add_argument('--isolate-server', required=True, type=str)
parser.add_argument('--batches', type=int, default=25,
help="Run xcode install in batches of size |batches|.")
parser.add_argument('--dimension', nargs=2, action='append')
args = parser.parse_args()
args.dimension = args.dimension or []
script_dir = os.path.dirname(os.path.abspath(__file__))
tmp_dir = tempfile.mkdtemp(prefix='swarming_xcode')
try:
print('Making isolate.')
shutil.copyfile(os.path.join(script_dir, 'swarming_xcode_install.py'),
os.path.join(tmp_dir, 'swarming_xcode_install.py'))
shutil.copyfile(os.path.join(script_dir, 'mac_toolchain.py'),
os.path.join(tmp_dir, 'mac_toolchain.py'))
luci_client = os.path.join(args.luci_path, 'client')
cmd = [
sys.executable, os.path.join(luci_client, 'isolateserver.py'), 'archive',
'-I', args.isolate_server, tmp_dir,
]
isolate_hash = subprocess.check_output(cmd).split()[0]
print('Running swarming_xcode_install.')
# TODO(crbug.com/765361): The dimensions below should be updated once
# swarming for iOS is fleshed out, likely removing xcode_version 9 and
# adding different dimensions.
luci_tools = os.path.join(luci_client, 'tools')
dimensions = [['pool', 'Chrome'], ['xcode_version', '9.0']] + args.dimension
dim_args = []
for d in dimensions:
dim_args += ['--dimension'] + d
cmd = [
sys.executable, os.path.join(luci_tools, 'run_on_bots.py'),
'--swarming', args.swarming_server, '--isolate-server',
args.isolate_server, '--priority', '20', '--batches', str(args.batches),
'--tags', 'name:run_swarming_xcode_install',
] + dim_args + ['--name', 'run_swarming_xcode_install', '--', isolate_hash,
'python', 'swarming_xcode_install.py',
]
subprocess.check_call(cmd)
print('All tasks completed.')
finally:
shutil.rmtree(tmp_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
a313c2175bade44a9f2c9642aa13988f52b4a8fe | 3b8387d770b33850dca55a1f0657167906697b5b | /Factor_Combinations.py | 113656a0a6e3478216de6109ccfc39794fd585e3 | [] | no_license | BigZihao/Leetcode | fe2795d5485e4780c1ec79558eaf9017a830a516 | 95ec42c241a4815a8b35f7a71948f1bc4e58b5b3 | refs/heads/master | 2021-01-13T00:59:12.489768 | 2018-06-21T21:50:33 | 2018-06-21T21:50:33 | 48,407,360 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | class Solution(object):
def getFactors(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
res = []
nums = [i for i in range(2, n//2+1)]
self.dfs(nums, 0, float(n), [], res)
if res == [[]]:
res = []
return res
def dfs(self, nums, index, target, path, res):
if target == 1.0:
res.append(path)
if target < 1 or int(target)!=target:
return
for i in range(index, len(nums)):
self.dfs(nums, i, float(target/nums[i]), path + [nums[i]], res)
## iterative backtracking
def getFactors(self, n):
ans, stack, x = [], [], 2
while True:
if x > n/x:
if not stack:
return ans
ans.append(stack + [n])
x = stack.pop()
n*=x
x+=1
elif n%x == 0:
stack.append(x)
n/=x
else:
x+=1
Iterative:
def getFactors(self, n):
todo, combis = [(n, 2, [])], []
while todo:
n, i, combi = todo.pop()
while i * i <= n:
if n % i == 0:
combis += combi + [i, n/i],
todo += (n/i, i, combi+[i]),
i += 1
return combis
Recursive:
def getFactors(self, n):
def factor(n, i, combi, combis):
while i * i <= n:
if n % i == 0:
combis += combi + [i, n/i],
factor(n/i, i, combi+[i], combis)
i += 1
return combis
return factor(n, 2, [], []) | [
"zihao.zhang.ustb@gmail.com"
] | zihao.zhang.ustb@gmail.com |
04933053c52cb7e8eda097638de38731cb3f1993 | 0ceae32211da4de1e608bb6269d2e0d2962aeb70 | /.history/mysite/settings_20201102005220.py | 666127312a85102fc941b14a320c93354686908e | [] | no_license | FerValenzuela-ops/Prueba2 | ae5772ed7fde2ce2ae47af225d52d014c2c49c73 | e691341651e2674fb49d43dea9cb64f132708edc | refs/heads/main | 2023-01-13T06:50:36.614129 | 2020-11-20T22:13:47 | 2020-11-20T22:13:47 | 309,217,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,618 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.16.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j!dz+qfw0^9u6!@)gkg8(h1ep19(060*z(54q@wa3l97+d0tbh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'apppersona.apps.ApppersonaConfig', #Agregando la nueva aplicacion
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static/')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = 'media\apppersona'
CRISPY_TEMPLATE_PACK="bootstrap4"
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| [
"fernandox_240997@live.com"
] | fernandox_240997@live.com |
962a8952ba7841befd146ff48851cc417327d007 | dce4a52986ddccea91fbf937bd89e0ae00b9d046 | /jni-build/jni-build/jni/include/tensorflow/python/framework/proto_test.py | 8927226525952007c0ba44f63b86db7cf37ab223 | [
"MIT"
] | permissive | Lab603/PicEncyclopedias | 54a641b106b7bb2d2f71b2dacef1e5dbeaf773a6 | 6d39eeb66c63a6f0f7895befc588c9eb1dd105f9 | refs/heads/master | 2022-11-11T13:35:32.781340 | 2018-03-15T05:53:07 | 2018-03-15T05:53:07 | 103,941,664 | 6 | 3 | MIT | 2022-10-28T05:31:37 | 2017-09-18T13:20:47 | C++ | UTF-8 | Python | false | false | 1,582 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Protobuf related tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class ProtoTest(tf.test.TestCase):
# TODO(vrv): re-enable this test once we figure out how this can
# pass the pip install test (where the user is expected to have
# protobuf installed).
def _testLargeProto(self):
# create a constant of size > 64MB.
a = tf.constant(np.zeros([1024, 1024, 17]))
# Serialize the resulting graph def.
gdef = a.op.graph.as_graph_def()
serialized = gdef.SerializeToString()
unserialized = tf.Graph().as_graph_def()
# Deserialize back. Protobuf python library should support
# protos larger than 64MB.
unserialized.ParseFromString(serialized)
self.assertProtoEquals(unserialized, gdef)
if __name__ == "__main__":
tf.test.main()
| [
"super_mr.z@hotmail.comm"
] | super_mr.z@hotmail.comm |
22fca0ef459822f0b7eae711b110144809c43020 | 01301e5f486883865e3696f38ef913a232958343 | /antlir/website/gen/bzldoc.py | ff393ff286b32e61f2effacc831ea23092a6c786 | [
"MIT"
] | permissive | SaurabhAgarwala/antlir | 85fb09c87dafde56622b4107224b41f873f66442 | d9513d35d3eaa9d28717a40057a14d099c6ec775 | refs/heads/main | 2023-06-25T09:05:30.619684 | 2021-07-01T23:04:57 | 2021-07-01T23:06:11 | 382,355,446 | 0 | 0 | MIT | 2021-07-02T13:30:39 | 2021-07-02T13:30:39 | null | UTF-8 | Python | false | false | 7,319 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
bzldoc.py is a simple documentation extractor that parses docstrings out of
.bzl files and converts them to .md
bzldoc will look for a top-level struct that serves as the public API of the
.bzl file and expose that accordingly. If not present, "public" functions
(without a leading _) are documented in the output file.
There is currently no additional parsing done on the docstrings themselves
(for example, to highlight function arguments).
"""
import argparse
import ast
import os
from dataclasses import dataclass
from typing import Iterable, Mapping, Optional
from antlir.artifacts_dir import find_buck_cell_root
from antlir.common import get_logger
from antlir.fs_utils import Path
log = get_logger()
# Global mapping to track all the parse modules to resolve references between
# files, since antlir apis heavily employ redirection in API exports.
all_modules: Mapping[Path, "BzlFile"] = {}
@dataclass(frozen=True)
class BzlFile(object):
path: Path
module: ast.Module
@property
def name(self) -> str:
return self.path.basename().decode()
@property
def docblock(self) -> Optional[str]:
return ast.get_docstring(self.module)
@property
def body(self) -> Iterable[ast.AST]:
return self.module.body
@property
def export_struct(self) -> Optional[Mapping[str, ast.AST]]:
"""Look for a struct that exports the 'public api' of this module"""
assignments = [
node for node in self.body if isinstance(node, ast.Assign)
]
# typically this is at the end of the module, so iterate backwards
for e in reversed(assignments):
# For easy matching, it is assumed that the name of the struct
# matches the module name
# pyre-fixme[16]: `expr` has no attribute `id`.
if len(e.targets) == 1 and e.targets[0].id == self.name:
# pyre-fixme[16]: `expr` has no attribute `keywords`.
return {kw.arg: kw.value for kw in e.value.keywords}
return None
@property
def functions(self) -> Mapping[str, ast.FunctionDef]:
return {
node.name: node
for node in self.body
if isinstance(node, ast.FunctionDef)
}
@property
def loaded_symbols(self) -> Mapping[str, str]:
"""Returns map of symbol -> source file target"""
loads = [
node.value
for node in self.body
if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call)
# pyre-fixme[16]: `expr` has no attribute `func`.
and isinstance(node.value.func, ast.Name)
and node.value.func.id == "load"
]
symbols = {}
for load in loads:
# pyre-fixme[16]: `expr` has no attribute `args`.
file = load.args[0].s.lstrip("/").encode()
if file.startswith(b":"):
file = self.path.dirname() / file.lstrip(b":")
file = Path(file.replace(b":", b"/")[:-4])
file_symbols = [a.s for a in load.args[1:]]
for s in file_symbols:
symbols[s] = file
return symbols
def resolve_function(self, name: str) -> Optional[ast.FunctionDef]:
"""
Attempt to resolve the given function name, traversing load()
calls if it is not defined locally.
"""
f = self.functions.get(name, None)
if f:
return f
src = self.loaded_symbols.get(name, None)
if src:
if src not in all_modules:
log.warning(
f"{name} is loaded from {src}, which was not parsed"
)
return None
# pyre-fixme[6]: Expected `Path` for 1st param but got `str`.
return all_modules[src].resolve_function(name)
log.warning(f"{self.path}: '{name}' not defined locally or loaded")
return None
@property
def header(self) -> str:
return (
f"""---
id: {self.path.basename().decode()}
title: {self.path.basename().decode().capitalize()}
generated: """
+ "'@"
+ "generated'"
+ "\n---\n"
)
def generate_md(self) -> Optional[str]:
"""
Generate a .md doc describing the exported API of this module, or
None if there is no export struct.
This MUST be called after parsing every module, since it does
cross-module docstring resolution.
"""
if not self.export_struct:
log.warning(f"{self.path}: missing export struct, not documenting")
return None
md = self.header
md += self.docblock or ""
md += "\n\n"
md += "API\n===\n"
# pyre-fixme[16]: `Optional` has no attribute `items`.
for name, node in self.export_struct.items():
if not isinstance(node, ast.Name):
log.warning(f"not documenting non-name '{name}: {node}'")
continue
func = self.resolve_function(node.id)
if not func:
log.warning(f"not documenting unresolved func '{name}'")
continue
args = [a.arg for a in func.args.args]
if func.args.vararg:
# pyre-fixme[16]: `Optional` has no attribute `arg`.
args.append("*" + func.args.vararg.arg)
if func.args.kwarg:
args.append("**" + func.args.kwarg.arg)
args = ", ".join(args)
md += f"`{name}`\n---\n"
md += f"Prototype: `{name}({args})`\n\n"
md += ast.get_docstring(func) or "No docstring available.\n"
md += "\n\n"
return md
def bzldoc():
parser = argparse.ArgumentParser()
parser.add_argument("bzls", type=Path.from_argparse, nargs="+")
parser.add_argument("outdir", type=Path.from_argparse)
args = parser.parse_args()
bzls = args.bzls
outdir = args.outdir
os.makedirs(outdir, exist_ok=True)
repo_root = find_buck_cell_root()
for bzl in bzls:
# always deal with relative paths from repo root
parsed = ast.parse(bzl.read_text())
bzl = bzl.abspath().relpath(repo_root)
assert bzl.endswith(b".bzl")
module_path = Path(bzl[:-4])
module = BzlFile(module_path, parsed)
all_modules[module_path] = module
for mod in all_modules.values():
md = mod.generate_md()
if not md:
continue
dstdir = outdir / mod.path.relpath("antlir/bzl").dirname()
dst = dstdir / f"gen-{mod.path.basename()}.md"
if not dstdir.exists():
os.makedirs(dstdir, exist_ok=True)
# avoid rewriting the file if the contents are the same to avoid
# endlessly recompiling in `yarn watch`
if dst.exists() and dst.read_text() == md:
log.debug(f"{dst} is unchanged")
else:
log.info(f"updating generated docs {dst}")
with open(dst, "w") as out:
out.write(md)
if __name__ == "__main__":
bzldoc()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
30d062eb92268c2c0fb2038659b8c53a27bb402e | 823b75a3a6a99870a19d666cdad829763077e532 | /16_fastx_grep/tests/fastx_grep_test.py | d33f976fa4c3332c7dd0e6396da09d48f07628ad | [
"MIT"
] | permissive | christophergaughan/biofx_python | acf79d39bb111781980929dbe51f6c3b230618d5 | b0fe2363ac08e9287bf8648cc41b1f7f0b518383 | refs/heads/main | 2023-03-01T06:24:53.968445 | 2021-02-11T17:16:34 | 2021-02-11T17:16:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,297 | py | """ Tests for fastx_grep.py """
import os
import platform
import random
import string
import re
from subprocess import getstatusoutput
from typing import List
PRG = './fastx_grep.py'
RUN = f'python {PRG}' if platform.system() == 'Windows' else PRG
EMPTY = './tests/inputs/empty.fa'
LSU = './tests/inputs/lsu.fq'
LSU_FA = './tests/inputs/lsu.fa'
BAD_EXT = './tests/inputs/lsu.fx'
# --------------------------------------------------
def test_exists() -> None:
"""exists"""
assert os.path.isfile(PRG)
# --------------------------------------------------
def test_usage() -> None:
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{RUN} {flag}')
assert rv == 0
assert out.lower().startswith('usage:')
# --------------------------------------------------
def test_bad_file() -> None:
""" Dies on bad file """
pattern = random_string()
bad = random_string()
rv, out = getstatusoutput(f'{RUN} {pattern} {bad}')
assert rv != 0
assert out.lower().startswith('usage:')
assert re.search(f"No such file or directory: '{bad}'", out)
# --------------------------------------------------
def test_cannot_guess() -> None:
""" Dies on unguessable extension """
pattern = random_string()
rv, out = getstatusoutput(f'{RUN} {pattern} {BAD_EXT}')
assert rv != 0
assert re.search(f'Please specify file format for "{BAD_EXT}"', out)
# --------------------------------------------------
def test_out_file() -> None:
""" out_file """
out_file = random_string()
if os.path.isfile(out_file):
os.remove(out_file)
try:
flag = '-o' if random.choice([0, 1]) else '--outfile'
rv, out = getstatusoutput(f'{RUN} {flag} {out_file} LSU {LSU}')
assert rv == 0
assert os.path.isfile(out_file)
expected = open(LSU + '.upper.out').read().rstrip()
assert open(out_file).read().rstrip() == expected
finally:
if os.path.isfile(out_file):
os.remove(out_file)
# --------------------------------------------------
def run(pattern: str,
input_file: str,
expected_file: str,
opts: List[str] = []) -> None:
""" Runs on command-line input """
assert os.path.isfile(expected_file)
expected = open(expected_file).read().rstrip()
out_file = random_string()
if os.path.isfile(out_file):
os.remove(out_file)
try:
cmd = f"{RUN} {' '.join(opts)} {pattern} -o {out_file} {input_file}"
rv, out = getstatusoutput(cmd)
assert os.path.isfile(out_file)
assert rv == 0
assert open(out_file).read().rstrip() == expected
finally:
if os.path.isfile(out_file):
os.remove(out_file)
# --------------------------------------------------
def test_empty_file() -> None:
""" Handles empty file """
pattern = random_string()
run(pattern, EMPTY, EMPTY + '.out')
# --------------------------------------------------
def test_lsu_uppercase() -> None:
""" LSU """
run('LSU', LSU, LSU + '.upper.out')
# --------------------------------------------------
def test_lsu_lowercase() -> None:
""" lsu """
run('lsu', LSU, LSU + '.lower.out')
# --------------------------------------------------
def test_lsu_uppercase_insensitive() -> None:
""" -i LSU """
run('LSU', LSU, LSU + '.i.upper.out', ['-i'])
# --------------------------------------------------
def test_lsu_lowercase_insensitive() -> None:
""" -i lsu """
run('lsu', LSU, LSU + '.i.lower.out', ['--insensitive'])
# --------------------------------------------------
def test_outfmt_fastq_to_fasta() -> None:
""" outfmt """
flag = '-O' if random.choice([0, 1]) else '--outfmt'
run('LSU', LSU, LSU + '.fa.out', [f'{flag} fasta'])
# --------------------------------------------------
def test_outfmt_fastq_to_fasta2line() -> None:
""" outfmt """
flag = '-O' if random.choice([0, 1]) else '--outfmt'
run('LSU', LSU, LSU + '.2fa.out', [f'{flag} fasta-2line'])
# --------------------------------------------------
def random_string() -> str:
""" Generate a random string """
k = random.randint(5, 10)
return ''.join(random.choices(string.ascii_letters + string.digits, k=k))
| [
"kyclark@gmail.com"
] | kyclark@gmail.com |
9ec902f3250fb4d54eabb913287fd11ed726e518 | 3fb279aebc3dabdbf23b215033166880b2deb11a | /core/migrations/0001_initial.py | 9f8a4bc82a9f2110e572aa844bb89825974085f4 | [] | no_license | kurisukri/CousinsDjango | 8139ad692ef0d8e3b871686e8d01074e0682d085 | 893b51f1f14652cfd0bed7e8a94ce8d0d1869f70 | refs/heads/master | 2023-06-19T10:15:14.346909 | 2021-07-09T00:31:00 | 2021-07-09T00:31:00 | 384,280,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | # Generated by Django 3.2.5 on 2021-07-08 12:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TipoRopa',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ropa', models.CharField(max_length=80)),
],
),
migrations.CreateModel(
name='DescripcionRopa',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('valor', models.CharField(max_length=50)),
('talla', models.IntegerField()),
('genero', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.tiporopa')),
],
),
]
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
a7ff4cb08c7a67ed1ddb1ef59c1823983a2d871e | 34b9b39442bde1a3c8fa670ef60bcc84d772a067 | /Assignment 8 Pandas C Deadline Nov 28th 2017/assignment8_riva/assignment8_riva.py | 4be45bfb385dbb9f1ec5c84747421e2afc5cee58 | [] | no_license | bnajafi/Scientific_Python_Assignments_POLIMI_EETBS | b398fc2754b843d63cd06d517235c16177a87dcf | 8da926e995dcaf02a297c6bb2f3120c49d6d63da | refs/heads/master | 2021-05-07T22:36:14.715936 | 2018-01-16T21:12:33 | 2018-01-16T21:12:33 | 107,265,075 | 38 | 86 | null | 2018-01-16T21:12:34 | 2017-10-17T12:24:04 | Python | UTF-8 | Python | false | false | 1,285 | py | #ASSIGNMENT 8
import os
import numpy as np
import pandas as pd
import scipy as sp
os.chdir("/Users/Fede/Desktop/RLF Method")
Windows_DF=pd.read_csv("windows.csv",sep=";",index_col=0)
def Irrad (L,D):
Beam_Irradiation=pd.read_csv("BeamIrradiance.csv",sep=";",index_col=0)
Diffuse_Irradiation=pd.read_csv("DiffuseIrradiance.csv",sep=";",index_col=0)
name_of_columns=Beam_Irradiation.columns.get_values()
name_of_columns_as_numbers=name_of_columns.astype(np.int32, copy=False)
name_of_columns1=Diffuse_Irradiation.columns.get_values()
name_of_columns_as_numbers1=name_of_columns1.astype(np.int32, copy=False)
Beam_Irradiation_def=sp.interp(L,name_of_columns_as_numbers,Beam_Irradiation.loc[D])
Diffuse_Irradiation_def=sp.interp(L,name_of_columns_as_numbers1,Diffuse_Irradiation.loc[D])
PXI=Beam_Irradiation_def+Diffuse_Irradiation_def
return PXI
#Piacenza_case
latitude_location = 45
PXI_values=[]
for index in Windows_DF.index.tolist():
print index
PXI_values = np.append(PXI_values,Irrad(latitude_location,Windows_DF["Direction"][index]))
print PXI_values
Windows_DF["PXI"] = PXI_values
Windows_DF.to_csv("windows_completed_withPXI.csv",sep=";")
Windows_DF.to_html("Window_Completed_withPXI.html")
| [
"b.najafi87@gmail.com"
] | b.najafi87@gmail.com |
d0c6abf00359ddc3a1523cb657b49938f8600ec9 | 759653bf8bd290e023d8f71a0cd5faa95c1687b0 | /code/682.py | 000c05ad032cd2c7fa334010a64023bc87993306 | [] | no_license | RuidongZ/LeetCode | 6032fc02d3f996155c4f6965f2ad2fc48de6c3c2 | ef8f9edd7857f4ef103924e21224dcd878c87196 | refs/heads/master | 2022-02-27T12:32:00.261851 | 2019-10-17T08:54:34 | 2019-10-17T08:54:34 | 115,314,228 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | # -*- Encoding:UTF-8 -*-
# 682. Baseball Game
# You're now a baseball game point recorder.
# Given a list of strings, each string can be one of the 4 following types:
#
# Integer (one round's score): Directly represents the number of points you get in this round.
# "+" (one round's score): Represents that the points
# you get in this round are the sum of the last two valid round's points.
# "D" (one round's score): Represents that the points
# you get in this round are the doubled data of the last valid round's points.
# "C" (an operation, which isn't a round's score): Represents the last valid round's points
# you get were invalid and should be removed.
# Each round's operation is permanent and could have an impact on the round before and the round after.
#
# You need to return the sum of the points you could get in all the rounds.
#
# Example 1:
# Input: ["5","2","C","D","+"]
# Output: 30
# Explanation:
# Round 1: You could get 5 points. The sum is: 5.
# Round 2: You could get 2 points. The sum is: 7.
# Operation 1: The round 2's data was invalid. The sum is: 5.
# Round 3: You could get 10 points (the round 2's data has been removed). The sum is: 15.
# Round 4: You could get 5 + 10 = 15 points. The sum is: 30.
# Example 2:
# Input: ["5","-2","4","C","D","9","+","+"]
# Output: 27
# Explanation:
# Round 1: You could get 5 points. The sum is: 5.
# Round 2: You could get -2 points. The sum is: 3.
# Round 3: You could get 4 points. The sum is: 7.
# Operation 1: The round 3's data is invalid. The sum is: 3.
# Round 4: You could get -4 points (the round 3's data has been removed). The sum is: -1.
# Round 5: You could get 9 points. The sum is: 8.
# Round 6: You could get -4 + 9 = 5 points. The sum is 13.
# Round 7: You could get 9 + 5 = 14 points. The sum is 27.
# Note:
# The size of the input list will be between 1 and 1000.
# Every integer represented in the list will be between -30000 and 30000.
class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
ansList = []
for op in ops:
if op == "C":
ansList.pop()
elif op == "D":
tmp = ansList[-1]
ansList.append(tmp*2)
elif op == "+":
tmp = ansList[-1] + ansList[-2]
ansList.append(tmp)
else:
ansList.append(int(op))
return sum(ansList)
| [
"459597855@qq.com"
] | 459597855@qq.com |
90b5d74c4c0bbbf6e97d5ec51a0bc047fac45332 | 45870a80cbe343efe95eb9e8d0bd47c8c88353d1 | /特殊的函数/venv/Lib/site-packages/tensorflow/tools/api/generator/api/sets/__init__.py | e3bd9fc0fa9b918b422364fe9ce245c7e279815e | [] | no_license | pippichi/IntelliJ_PYTHON | 3af7fbb2c8a3c2ff4c44e66736bbfb7aed51fe88 | 0bc6ded6fb5b5d9450920e4ed5e90a2b82eae7ca | refs/heads/master | 2021-07-10T09:53:01.264372 | 2020-07-09T13:19:41 | 2020-07-09T13:19:41 | 159,319,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | """Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.ops.sets import set_difference
from tensorflow.python.ops.sets import set_intersection
from tensorflow.python.ops.sets import set_size
from tensorflow.python.ops.sets import set_union | [
"874496049@qq.com"
] | 874496049@qq.com |
4890abc714e53efa4830665516af7572c922471a | 54ab0f79f5d68f4732ca7d205f72ecef99862303 | /torch/onnx/symbolic_opset16.py | 309309771df8a9f89de8a863ec1de008068633ac | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | csarofeen/pytorch | a9dd0f8ffa0642d72df2d5e109a8b4d9c2389cbc | e8557ec5e064608577f81e51ccfe7c36c917cb0f | refs/heads/devel | 2023-04-30T02:42:13.558738 | 2023-03-14T00:50:01 | 2023-03-14T00:50:01 | 88,071,101 | 35 | 10 | NOASSERTION | 2023-06-21T17:37:30 | 2017-04-12T16:02:31 | C++ | UTF-8 | Python | false | false | 3,721 | py | """This file exports ONNX ops for opset 16.
Note [ONNX Operators that are added/updated in opset 16]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
https://github.com/onnx/onnx/blob/main/docs/Changelog.md#version-16-of-the-default-onnx-operator-set
New operators:
GridSample https://github.com/onnx/onnx/pull/3557
Updated operators:
Identity
If
LeakyRelu
Loop
PRelu
RoiAlign
Scan
ScatterElements
ScatterND
Where
GreaterOrEqual
LessOrEqual
"""
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in README.md
import functools
import torch
from torch.nn.functional import (
GRID_SAMPLE_INTERPOLATION_MODES,
GRID_SAMPLE_PADDING_MODES,
)
from torch.onnx import _type_utils, symbolic_helper
from torch.onnx._internal import _beartype, jit_utils, registration
_onnx_symbolic = functools.partial(registration.onnx_symbolic, opset=16)
# note (mkozuki): Why `grid_sampler` instead of `grid_sample`?
# Because `torch.nn.functional.grid_sample` calls `torch.grid_sampler`.
@_onnx_symbolic("aten::grid_sampler")
@symbolic_helper.parse_args("v", "v", "i", "i", "b")
@_beartype.beartype
def grid_sampler(
g: jit_utils.GraphContext,
input,
grid,
mode_enum,
padding_mode_enum,
align_corners,
):
mode_s = {v: k for k, v in GRID_SAMPLE_INTERPOLATION_MODES.items()}[mode_enum] # type: ignore[call-arg]
padding_mode_s = {v: k for k, v in GRID_SAMPLE_PADDING_MODES.items()}[padding_mode_enum] # type: ignore[call-arg]
return g.op(
"GridSample",
input,
grid,
align_corners_i=int(align_corners),
mode_s=mode_s,
padding_mode_s=padding_mode_s,
)
@_onnx_symbolic("aten::scatter_add")
@symbolic_helper.parse_args("v", "i", "v", "v")
@_beartype.beartype
def scatter_add(g: jit_utils.GraphContext, self, dim, index, src):
if symbolic_helper.is_caffe2_aten_fallback():
return g.at("scatter", self, dim, index, src, overload_name="src")
src_type = _type_utils.JitScalarType.from_value(
src, _type_utils.JitScalarType.UNDEFINED
)
src_sizes = symbolic_helper._get_tensor_sizes(src)
index_sizes = symbolic_helper._get_tensor_sizes(index)
if len(src_sizes) != len(index_sizes):
return symbolic_helper._unimplemented(
"scatter_add",
f"`index` ({index_sizes}) should have the same dimensionality as `src` ({src_sizes})",
)
# PyTorch only allows index shape <= src shape, so we can only consider
# taking index as subset size to src, like PyTorch does. When sizes for src
# and index are not matched or there are dynamic axes, we take index shape to
# slice src to accommodate.
if src_sizes != index_sizes or None in index_sizes:
adjusted_shape = g.op("Shape", index)
starts = g.op("Constant", value_t=torch.tensor([0] * len(index_sizes)))
src = g.op("Slice", src, starts, adjusted_shape)
src = symbolic_helper._maybe_get_scalar(src)
if symbolic_helper._is_value(src):
return g.op("ScatterElements", self, index, src, axis_i=dim, reduction_s="add")
else:
# Check if scalar "src" has same type as self (PyTorch allows different
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
if _type_utils.JitScalarType.from_value(self) != src_type:
src = g.op(
"Cast",
src,
to_i=_type_utils.JitScalarType.from_value(self).onnx_type(),
)
return g.op(
"ScatterElements",
self,
index,
src,
axis_i=dim,
reduction_s="add",
)
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
754921f35d76333b549922c24ffecfa23b312d14 | c30bb146ea7eea462643776ee082e812ef737f1b | /docaligner/collect_devset.py | 6fdc64f525e5c419591aebcad25af63950a24af4 | [
"Apache-2.0"
] | permissive | christianbuck/CorpusMining | 918a21d8c708da5dbe389e1fce1df3b6b0f2ab83 | f9248c3528a415a1e5af2c5a54a60c16cd79ff1d | refs/heads/master | 2021-01-01T04:19:37.008401 | 2016-05-01T22:17:17 | 2016-05-01T22:18:05 | 56,846,782 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
seen_e = set()
seen_f = set()
written = set()
for line in sys.stdin:
line = line.split('\t')
e, f = None, None
if line[0] == 'en' and line[2] == 'fr':
e, f = line[1], line[3]
elif line[0] == 'fr' and line[2] == 'en':
f, e = line[1], line[3]
else:
continue
if (f,e) in written:
continue
written.add((f,e))
assert e not in seen_e
assert e not in seen_f
seen_e.add(e)
assert f not in seen_f
assert f not in seen_e
seen_f.add(f)
print "%s\t%s" % (f, e)
| [
"cbuck@lantis.de"
] | cbuck@lantis.de |
9064d19e4d304a39793423587aab4d40eb1e04b7 | dded9fb6567928952a283fc1c6db6a5a860bc1a6 | /tests/browser/after_hooks_tests.py | 16ded3721d2c8e8b688284b77ebfbd388120865c | [
"MIT"
] | permissive | watir/nerodia | 08b84aca4b72eae37e983006c15b824412335553 | 7e020f115b324ad62fe7800f3e1ec9cc8b25fcfe | refs/heads/master | 2023-04-15T20:02:34.833489 | 2023-04-06T23:46:14 | 2023-04-06T23:46:14 | 87,383,565 | 88 | 14 | MIT | 2023-04-06T23:42:29 | 2017-04-06T03:43:47 | Python | UTF-8 | Python | false | false | 8,145 | py | import pytest
from selenium.common.exceptions import UnexpectedAlertPresentException
from nerodia.exception import UnknownObjectException
@pytest.fixture
def cleanup_hooks(browser):
yield
browser.window(index=0).use()
browser.after_hooks.after_hooks = []
@pytest.fixture
def clear_alert(browser):
yield
if browser.alert.exists:
browser.alert.ok()
pytestmark = pytest.mark.usefixtures('cleanup_hooks')
class TestAfterHooksAdd(object):
def test_raises_correct_exception_when_not_given_any_arguments(self, browser):
with pytest.raises(ValueError):
browser.after_hooks.add()
def test_runs_the_given_method_on_each_page_load(self, browser, page):
output = []
def hook(b):
output.extend([b.text])
browser.after_hooks.add(method=hook)
browser.goto(page.url('non_control_elements.html'))
assert 'Dubito, ergo cogito, ergo sum' in ''.join(output)
class TestAfterHooksDelete(object):
def test_removes_a_previously_added_after_hook(self, browser, page):
output = []
def hook(b):
output.extend([b.text])
browser.after_hooks.add(method=hook)
browser.goto(page.url('non_control_elements.html'))
assert 'Dubito, ergo cogito, ergo sum' in ''.join(output)
browser.after_hooks.delete(hook)
browser.goto(page.url('definition_lists.html'))
assert 'definition_lists' not in ''.join(output)
class TestAfterHooksRun(object):
def test_runs_after_hooks_after_browser_goto(self, browser, page):
result = {}
def hook(b):
result['value'] = b.title == 'The font element'
browser.after_hooks.add(method=hook)
browser.goto(page.url('font.html'))
assert result['value'] is True
@pytest.mark.page('font.html')
def test_runs_after_hooks_after_browser_refresh(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'The font element'
browser.after_hooks.add(method=hook)
browser.refresh()
assert result['value'] is True
@pytest.mark.page('non_control_elements.html')
def test_runs_after_hooks_after_element_click(self, browser):
result = {}
def hook(b):
b.wait_until(lambda br: br.title == 'Forms with input elements')
result['value'] = True
browser.after_hooks.add(method=hook)
browser.link(index=2).click()
assert result.get('value') is True
# TODO: xfail firefox
@pytest.mark.page('forms_with_input_elements.html')
def test_runs_after_hooks_after_element_submit(self, browser):
result = {}
def hook(b):
result['value'] = b.div(id='messages').text == 'submit'
browser.after_hooks.add(method=hook)
browser.form(id='new_user').submit()
assert result.get('value') is True
@pytest.mark.xfail_firefox(reason='https://github.com/mozilla/geckodriver/issues/661')
@pytest.mark.page('non_control_elements.html')
def test_runs_after_hooks_after_element_double_click(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Non-control elements'
browser.after_hooks.add(method=hook)
browser.div(id='html_test').double_click()
assert result.get('value') is True
# TODO: xfail safari, firefox
@pytest.mark.page('right_click.html')
def test_runs_after_hooks_after_element_right_click(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Right Click Test'
browser.after_hooks.add(method=hook)
browser.div(id='click').right_click()
assert result.get('value') is True
# TODO: xfail safari
@pytest.mark.page('iframes.html')
def test_runs_after_hooks_after_framed_driver_switch(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Iframes'
browser.after_hooks.add(method=hook)
browser.iframe().element(css='#senderElement').exists
assert result.get('value') is True
# TODO: xfail safari
@pytest.mark.page('iframes.html')
def test_runs_after_hooks_after_browser_ensure_context(self, browser):
browser.iframe().element(css='#senderElement').locate()
result = {}
def hook(b):
result['value'] = b.title == 'Iframes'
browser.after_hooks.add(method=hook)
browser.locate()
assert result.get('value') is True
# TODO: xfail safari
@pytest.mark.page('alerts.html')
def test_runs_after_hooks_after_alert_ok(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Alerts'
browser.after_hooks.add(method=hook)
with browser.after_hooks.without():
browser.button(id='alert').click()
browser.alert.ok()
assert result.get('value') is True
# TODO: xfail safari
@pytest.mark.page('alerts.html')
def test_runs_after_hooks_after_alert_close(self, browser):
result = {}
def hook(b):
result['value'] = b.title == 'Alerts'
browser.after_hooks.add(method=hook)
with browser.after_hooks.without():
browser.button(id='alert').click()
browser.alert.close()
assert result.get('value') is True
@pytest.mark.xfail_firefox(reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1223277',
raises=UnknownObjectException)
@pytest.mark.page('alerts.html')
@pytest.mark.quits_browser
@pytest.mark.usefixtures('quick_timeout')
def test_does_not_run_error_checks_with_alert_present(self, browser):
result = []
def hook(b):
result.append(b.title == 'Alerts')
browser.after_hooks.add(method=hook)
browser.button(id='alert').click()
assert not result
browser.alert.ok()
assert result
@pytest.mark.usefixtures('clear_alert')
def test_does_not_raise_error_when_running_error_checks_using_after_hooks_without_with_alert_present(self, browser, page):
def hook(b):
b.url
browser.after_hooks.add(method=hook)
browser.goto(page.url('alerts.html'))
with browser.after_hooks.without():
browser.button(id='alert').click()
@pytest.mark.xfail_firefox(reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1223277',
raises=UnexpectedAlertPresentException)
@pytest.mark.usefixtures('clear_alert')
def test_does_not_raise_error_if_no_error_checks_are_defined_with_alert_present(self, browser, page):
def hook(b):
b.url
browser.after_hooks.add(method=hook)
browser.goto(page.url('alerts.html'))
browser.after_hooks.delete(hook)
browser.button(id='alert').click()
@pytest.mark.xfail_firefox(reason='https://bugzilla.mozilla.org/show_bug.cgi?id=1223277',
raises=UnexpectedAlertPresentException)
@pytest.mark.usefixtures('clear_alert')
def test_does_not_raise_error_when_running_error_checks_on_closed_window(self, browser, page):
def hook(b):
b.url
browser.after_hooks.add(method=hook)
browser.goto(page.url('window_switching.html'))
browser.link(id='open').click()
window = browser.window(title='closeable window')
window.use()
browser.link(id='close').click()
class TestAfterHooksLength(object):
def test_provides_the_number_of_after_hooks(self, browser):
def hook():
return True
for _ in range(4):
browser.after_hooks.add(hook)
assert len(browser.after_hooks) == 4
class TestAfterHooksGetItem(object):
def test_returns_the_after_hook_at_the_provided_index(self, browser):
def hook1():
return True
def hook2():
return False
browser.after_hooks.add(hook1)
browser.after_hooks.add(hook2)
assert browser.after_hooks[1] == hook2
| [
"lucast1533@gmail.com"
] | lucast1533@gmail.com |
6dbb7130fb243a31b3df1a3302caa8ea76fc668f | bc441bb06b8948288f110af63feda4e798f30225 | /monitor_sdk/api/alert_rule/delete_alert_rule_pb2.py | 5a83c31591a999f3815fab14c9af681c7edc0708 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 7,377 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: delete_alert_rule.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='delete_alert_rule.proto',
package='alert_rule',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x17\x64\x65lete_alert_rule.proto\x12\nalert_rule\x1a\x1cgoogle/protobuf/struct.proto\"$\n\x16\x44\x65leteAlertRuleRequest\x12\n\n\x02id\x18\x01 \x01(\t\"[\n\x17\x44\x65leteAlertRuleResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12%\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x85\x01\n\x1e\x44\x65leteAlertRuleResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x31\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32#.alert_rule.DeleteAlertRuleResponseb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_DELETEALERTRULEREQUEST = _descriptor.Descriptor(
name='DeleteAlertRuleRequest',
full_name='alert_rule.DeleteAlertRuleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='alert_rule.DeleteAlertRuleRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=105,
)
_DELETEALERTRULERESPONSE = _descriptor.Descriptor(
name='DeleteAlertRuleResponse',
full_name='alert_rule.DeleteAlertRuleResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='alert_rule.DeleteAlertRuleResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='msg', full_name='alert_rule.DeleteAlertRuleResponse.msg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='alert_rule.DeleteAlertRuleResponse.data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=107,
serialized_end=198,
)
_DELETEALERTRULERESPONSEWRAPPER = _descriptor.Descriptor(
name='DeleteAlertRuleResponseWrapper',
full_name='alert_rule.DeleteAlertRuleResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='alert_rule.DeleteAlertRuleResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='alert_rule.DeleteAlertRuleResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='alert_rule.DeleteAlertRuleResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='alert_rule.DeleteAlertRuleResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=201,
serialized_end=334,
)
_DELETEALERTRULERESPONSE.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_DELETEALERTRULERESPONSEWRAPPER.fields_by_name['data'].message_type = _DELETEALERTRULERESPONSE
DESCRIPTOR.message_types_by_name['DeleteAlertRuleRequest'] = _DELETEALERTRULEREQUEST
DESCRIPTOR.message_types_by_name['DeleteAlertRuleResponse'] = _DELETEALERTRULERESPONSE
DESCRIPTOR.message_types_by_name['DeleteAlertRuleResponseWrapper'] = _DELETEALERTRULERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeleteAlertRuleRequest = _reflection.GeneratedProtocolMessageType('DeleteAlertRuleRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEALERTRULEREQUEST,
'__module__' : 'delete_alert_rule_pb2'
# @@protoc_insertion_point(class_scope:alert_rule.DeleteAlertRuleRequest)
})
_sym_db.RegisterMessage(DeleteAlertRuleRequest)
DeleteAlertRuleResponse = _reflection.GeneratedProtocolMessageType('DeleteAlertRuleResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEALERTRULERESPONSE,
'__module__' : 'delete_alert_rule_pb2'
# @@protoc_insertion_point(class_scope:alert_rule.DeleteAlertRuleResponse)
})
_sym_db.RegisterMessage(DeleteAlertRuleResponse)
DeleteAlertRuleResponseWrapper = _reflection.GeneratedProtocolMessageType('DeleteAlertRuleResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _DELETEALERTRULERESPONSEWRAPPER,
'__module__' : 'delete_alert_rule_pb2'
# @@protoc_insertion_point(class_scope:alert_rule.DeleteAlertRuleResponseWrapper)
})
_sym_db.RegisterMessage(DeleteAlertRuleResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
2c114217ef73ca6d31f1feed50418ed745207816 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/battle/shared/crosshair/settings.py | 657ae2ce4eace767c5376504f12ceb0827b4eee6 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,243 | py | # 2017.05.04 15:22:39 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/crosshair/settings.py
from AvatarInputHandler import aih_constants
CROSSHAIR_CONTAINER_SWF = 'crosshairPanelContainer.swf'
CROSSHAIR_ROOT_PATH = 'root.main'
CROSSHAIR_INIT_CALLBACK = 'registerCrosshairPanel'
CROSSHAIR_ITEM_PATH_FORMAT = '_level0.' + CROSSHAIR_ROOT_PATH + '.{}'
CROSSHAIR_RADIUS_MC_NAME = 'radiusMC'
SPG_GUN_MARKER_ELEMENTS_COUNT = aih_constants.SPG_GUN_MARKER_ELEMENTS_COUNT
SHOT_RESULT_TO_DEFAULT_COLOR = {aih_constants.SHOT_RESULT.UNDEFINED: 'normal',
aih_constants.SHOT_RESULT.NOT_PIERCED: 'red',
aih_constants.SHOT_RESULT.LITTLE_PIERCED: 'orange',
aih_constants.SHOT_RESULT.GREAT_PIERCED: 'green'}
SHOT_RESULT_TO_ALT_COLOR = {aih_constants.SHOT_RESULT.UNDEFINED: 'normal',
aih_constants.SHOT_RESULT.NOT_PIERCED: 'purple',
aih_constants.SHOT_RESULT.LITTLE_PIERCED: 'yellow',
aih_constants.SHOT_RESULT.GREAT_PIERCED: 'green'}
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\battle\shared\crosshair\settings.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:22:40 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
db378c92543395fef97a6e3c532b645c55dcff0f | 72b81c092351fa5740f6f156de23e5cc3e6c54e9 | /backEnd/academico/participante/urls.py | 3e749c311221399018f2f82f6dec35fdf301ceaa | [] | no_license | adamtuenti/repositorioDistribuidos | 0c1787726351e87c6272af721f9640f28834ef48 | e49332954437293f87cf62ad645f85208f9ed249 | refs/heads/main | 2023-02-10T03:25:28.748875 | 2021-01-14T04:25:29 | 2021-01-14T04:25:29 | 329,505,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py | from django.urls import path, include
from django.shortcuts import *
from academico.participante.views import *
from django.views.generic import TemplateView
from .views import *
from academico.participante.Api import *
urlpatterns = [
path('asistencia/', asistencia, name="participante_asistencia"),
path('login_participante/',login_participante.as_view(),name='login_participante'),
path('existe_participante/',existe_participante.as_view(),name='existe_participante'),
path('notificaciones_participante/',notificaciones_participante.as_view(),name='notificaciones_participante'),
path('actualizar_notificacion/',actualizar_notificacion.as_view(),name='actualizar_notificacion'),
path('cursos_participante/',cursos_participante.as_view(),name='cursos_participante'),
path('detalles_curso/',detalles_curso.as_view(),name='detalles_curso'),
path('asistencia/by_evento_and_fecha', asistencia_by_evento_and_fecha, name="asistencia_by_evento_and_fecha"),
#---------Reporte----------
path('ParticipantesReprobados/', part_reprobados,name='Part_Reprobados'),
path('historico_participante/', historico_participante, name='historico_participante'),
#--------
path('contacto_participante',contacto_participante,name='contacto_participante'),
path('registro_asistencia_evento/',registro_asistencia_evento,name='registro_asistencia_evento'),
path('reporte_asistencia',reporte_asistencia,name='reporte_asistencia'),
path('perfil_participante',perfil_participante,name='perfil_participante'),
#-----
path('acta_nota_evento',acta_nota_evento,name='acta_nota_evento'),
path('cierre_eventos',cierre_eventos,name='cierre_eventos'),
path('registrar_notas_1raevaluacion',registrar_notas1,name='registrar_notas1'),
path('registrar_notas_mejoramiento',registrar_notas_mejoramiento,name='registrar_notas_mejoramiento'),
path('rectificar_notas',corregir_notas,name='corregir_notas'),
path('aprobar_notas',aprobar_notas,name='aprobar_notas'),
]
| [
"adanavarrete15@gmail.com"
] | adanavarrete15@gmail.com |
c1bceea26ecf4074e830dc137980c465f45a87b6 | 7bebd0ff76a23ee7f334eea4c84ba1759b922992 | /app/main/forms.py | cb753c2aa388ceb469ef1a0283a53ec437f6ba45 | [
"MIT"
] | permissive | MutuaFranklin/PitchCentre | a429ad9414e18b4c54b5ed7405d45586ff0cd391 | da3ccf0b1f5cd165d11f72386aec59a76e79bee2 | refs/heads/main | 2023-07-16T17:34:23.013904 | 2021-08-19T12:30:03 | 2021-08-19T12:30:03 | 395,551,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | from typing import Text
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField, SelectField,SubmitField
from wtforms.validators import Required
from wtforms.ext.sqlalchemy.fields import QuerySelectField
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class PitchForm(FlaskForm):
title = StringField('Enter the title of your pitch',validators=[Required()])
pitch = TextAreaField('Enter your pitch',validators=[Required()])
category =SelectField("Pitch category",choices=[('Product Pitch','Product Pitch'),('Interview Pitch','Interview Pitch'), ('Technology Pitch','Technology Pitch'), ('Fashion Pitch','Fashion Pitch')],validators=[Required()])
submit = SubmitField('Post')
class CommentForm(FlaskForm):
comment = TextAreaField('Add a comment', validators=[Required()])
submit = SubmitField('Post')
| [
"franklin.mutua@student.moringaschool.com"
] | franklin.mutua@student.moringaschool.com |
0efac10fe189e8c081f6014cc173b2a7fa0b30ef | 875fd9c1dec693167919a8049c2a419528eb8913 | /downloadaudio/downloaders/google_tts.py | 31b5d23ed07ead67ab4c3a128df36f863c7d3568 | [] | no_license | ELLIOTTCABLE/anki-download-audio-forvo | c200d3c0ed2d9c193caf59046786389fe66958f0 | f44e287e718f375e38e05968d2e5587b9f002fcf | refs/heads/master | 2021-01-21T12:30:35.677391 | 2017-09-01T04:23:30 | 2017-09-01T04:23:30 | 102,073,183 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | # -*- mode: python; coding: utf-8 -*-
#
# Copyright © 2012–15 Roland Sieker <ospalh@gmail.com>
# Copyright © 2015 Paul Hartmann <phaaurlt@gmail.com>
# Inspiration and source of the URL: Tymon Warecki
#
# License: GNU AGPL, version 3 or later; http://www.gnu.org/copyleft/agpl.html
"""
Download pronunciations from GoogleTTS
"""
import urllib
from anki.template import furigana
from ..download_entry import Action, DownloadEntry
from .downloader import AudioDownloader
get_chinese = False
"""
Download for Chinese.
The Chinese support add-on downloads the pronunciation from GoogleTTS.
Using this for Chinese would lead to double downloads for most users,
so skip this by default.
"""
class GooglettsDownloader(AudioDownloader):
u"""Class to get pronunciations from Google’s TTS service."""
def __init__(self):
AudioDownloader.__init__(self)
self.icon_url = 'http://translate.google.com/'
self.url = 'http://translate.google.com/translate_tts?'
def download_files(self, field_data):
"""
Get text from GoogleTTS.
"""
self.downloads_list = []
if field_data.split:
return
if self.language.lower().startswith('zh'):
if not get_chinese:
return
word = furigana.kanji(field_data.word)
else:
word = field_data.word
self.maybe_get_icon()
if not field_data.word:
raise ValueError('Nothing to download')
word_path = self.get_tempfile_from_url(self.build_url(word))
entry = DownloadEntry(
field_data, word_path, dict(Source='GoogleTTS'), self.site_icon)
entry.action = Action.Delete
# Google is a robot voice. The pronunciations are usually
# bad. Default to not keeping them.
self.downloads_list.append(entry)
def build_url(self, source):
u"""Return a string that can be used as the url."""
qdict = dict(
tl=self.language, q=source.encode('utf-8'), ie='utf-8', client='t')
return self.url + urllib.urlencode(qdict)
| [
"ospalh@gmail.com"
] | ospalh@gmail.com |
85f9c234ed1e4cedb44d9c48426cdc833d8ade68 | fcc4df25f539e6057258706b10e1b602d2a3eaf7 | /pyannote/pipeline/__init__.py | b7e880526688d88047ee3c0eda32af18d20bb21e | [
"MIT"
] | permissive | PaulLerner/pyannote-pipeline | 40f08f5f34c1d9e3c8c906396df7322d3627f535 | b6ebc3fcef57c95ad539d79311c64a0b3cf86408 | refs/heads/master | 2020-09-21T18:37:28.447730 | 2019-06-26T20:55:18 | 2019-06-26T20:55:18 | 224,884,842 | 0 | 0 | NOASSERTION | 2019-11-29T16:05:57 | 2019-11-29T16:05:57 | null | UTF-8 | Python | false | false | 1,358 | py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from .pipeline import Pipeline
from .optimizer import Optimizer
| [
"bredin@limsi.fr"
] | bredin@limsi.fr |
c46871fd6397e3e5b692e19fdb711773176df5ad | 910467bd40fbd6385d22165165b34fbe7940f0e2 | /polyaxon_cli/cli/config.py | b2d52ac4b650732437170f89daf362cc40a8e842 | [
"MIT"
] | permissive | VitaliKaiser/polyaxon-cli | 73bd343ab31c051be490867703566016c41a9fb8 | d70d2af46cc8dceb12b0945c563c625455e66cda | refs/heads/master | 2021-01-25T11:48:52.303584 | 2018-03-01T10:42:38 | 2018-03-01T10:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import click
from polyaxon_cli.managers.config import GlobalConfigManager
from polyaxon_cli.utils.formatting import dict_tabulate, Printer
def validate_options(ctx, param, value):
possible_values = ['verbose', 'host']
if value and value not in possible_values:
raise click.BadParameter(
"Value `{}` is not supported, must one of the value {}".format(value, possible_values))
return value
@click.group(invoke_without_command=True)
@click.option('--list', '-l', is_flag=True, help='List all global config values.')
def config(list):
"""Set and get the global configurations."""
if list:
config = GlobalConfigManager.get_config()
Printer.print_header('Current config:')
dict_tabulate(config.to_dict())
@config.command()
@click.argument('keys', type=str, nargs=-1)
def get(keys):
"""Get the global config values by keys.
Example:
\b
```bash
$ polyaxon config get host http_port
```
"""
config = GlobalConfigManager.get_config_or_default()
if len(keys) == 0:
return
print_values = {}
for key in keys:
if hasattr(config, key):
print_values[key] = getattr(config, key)
else:
click.echo('Key `{}` is not recognised.'.format(key))
dict_tabulate(print_values, )
@config.command()
@click.option('--verbose', type=bool, help='To set the verbosity of the client.')
@click.option('--host', type=str, help='To set the server endpoint.')
@click.option('--http_port', type=int, help='To set the http port.')
@click.option('--ws_port', type=int, help='To set the stream port.')
@click.option('--use_https', type=bool, help='To set the https.')
def set(verbose, host, http_port, ws_port, use_https):
"""Set the global config values.
Example:
\b
```bash
$ polyaxon config set --hots=localhost http_port=80
```
"""
config = GlobalConfigManager.get_config_or_default()
if verbose is not None:
config.verbose = verbose
if host is not None:
config.host = host
if http_port is not None:
config.http_port = http_port
if ws_port is not None:
config.ws_port = ws_port
if use_https is not None:
config.use_https = use_https
GlobalConfigManager.set_config(config)
Printer.print_success('Config was update.')
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
e73fb95f01cb39b90427e85df8567250a84ac39e | 90608029a5e8e0d5392f3373bb50d48771ba2398 | /products/migrations/0001_initial.py | dee6adcdbacf865d6ed1edb73ec0f4bb375167d3 | [] | no_license | wahid999/PyShop | 986e78fbc7c000c082fdf860a182e985cd41c45c | 1bb639125a0292010153f67912b9dc54f3318b5a | refs/heads/master | 2023-07-12T10:52:21.113216 | 2021-08-24T01:21:26 | 2021-08-24T01:21:26 | 399,295,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # Generated by Django 3.1.3 on 2021-08-23 05:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('price', models.FloatField()),
('stock', models.IntegerField()),
('image_url', models.CharField(max_length=2083)),
],
),
]
| [
"wahidhussainturi@gmail.com"
] | wahidhussainturi@gmail.com |
220e633d0a3fd6cc2ab8040031f3ad949c5aeafd | ed823aaa73e9482576a7207c9c61953440540ee7 | /PycharmProjects/Selenium-python/multiplelist.py | bba790522c83ade5102176f599db41e84a46b213 | [] | no_license | KavithaBitra1980/pycharm-selenuim | 64b35ae4797e7ecb4644c06b0b12cdf629fcdf4d | 132b90d94461eccad30d7181651ba532674f3da9 | refs/heads/master | 2020-04-02T14:09:50.261066 | 2018-12-01T21:29:02 | 2018-12-01T21:29:02 | 154,513,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | #demo for multiplelists using ZIP
l1 = [1,2,3]
l2 = [3,4,5,10,20,30]
l3 = [5,10,15,20,25]
for a,b in zip(l1,l2):
print('the multiplication of both matrixes is',2*(a*b))
for a,b,c in zip(l1,l2,l3):
print(a,b,c)
if a < b and a <c and b < c:
print(a ,'is the smallest')
print(c, 'is the largest')
print(b, 'is larger than ', a)
"""
RESULTS
the multiplication of both matrixes is 6
the multiplication of both matrixes is 16
the multiplication of both matrixes is 30
1 3 5
1 is the smallest
5 is the largest
3 is larger than 1
2 4 10
2 is the smallest
10 is the largest
4 is larger than 2
3 5 15
3 is the smallest
15 is the largest
5 is larger than 3
""" | [
"kavithabitra1980@gmail.com"
] | kavithabitra1980@gmail.com |
6b7f58b30f675887a80ab342faf04989a04ff4ef | a5d21c7b508d86229faef0b5781b91631def00c0 | /0x0B_redis_basic/web.py | 68db1bf67401a0728e47b3de243d6b07d979e22b | [] | no_license | SeifJelidi/holbertonschool-web_back_end | 0361d43c9540c0d0312790e81b46d08cff689025 | a83f490066193fe2b03f60ee3b18a968b46d8fc0 | refs/heads/master | 2023-08-13T15:19:43.460754 | 2021-10-18T08:56:24 | 2021-10-18T08:56:24 | 388,186,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | #!/usr/bin/env python3
"""web module"""
from typing import Callable
import requests
import redis
from functools import wraps
redis_object = redis.Redis()
def count_req(method: Callable) -> Callable:
"""Count Request"""
@wraps(method)
def wrapper(link):
"""Wrapper method"""
redis_object.incr("count:{}".format(link))
c = redis_object.get("cached:{}".format(link))
if c:
return c.decode('utf-8')
r = method(link)
redis_object.setex("cached:{}".format(link), 10, r)
return r
return wrapper
@count_req
def get_page(url: str) -> str:
"""get_page"""
request = requests.get(url)
return request.text
| [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.