blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da919e0179455f6b53d11e7136b6f369f5bed978
|
ebe7c57183b0eeba9af1bdc72f0f81b9b8129ca9
|
/1. backtracking/047.py
|
d0866e064c3641441032f85b63db44d36015a6ae
|
[] |
no_license
|
proTao/leetcode
|
f2e46392b56b69606e1dd25cf5738cb0ad275645
|
97533d53c8892b6519e99f344489fa4fd4c9ab93
|
refs/heads/master
| 2021-11-24T10:23:56.927122
| 2021-11-18T04:28:05
| 2021-11-18T04:28:05
| 110,225,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
class Solution:
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
if len(nums) == 0:
return []
distinct_nums = set(nums)
rest = {}
for i in nums:
if i in rest:
rest[i] += 1
else:
rest[i] = 1
print(rest)
def deeper(path):
if len(path) == len(nums):
res.append(path)
for i in distinct_nums:
if rest[i] > 0:
rest[i] -= 1
deeper(path + [i])
rest[i] += 1
deeper([])
return res
s = Solution()
res = s.permuteUnique([1,1,2])
print(res)
|
[
"836807627@qq.com"
] |
836807627@qq.com
|
4dbf967617143afff7502d5004d540220460004e
|
fa58068fa4e0fd4c1c3713d27752c9c36efe8a44
|
/paket/main.py
|
c8843f50bd8aa984c6277fdbd565fab3476ac472
|
[] |
no_license
|
Zadrayca/Kourse
|
f611f4d3627286eeb5f1eedb7ce9bc5d06454cdc
|
e9a1bf6b658573eab556d9d1fa1297d452a6dfff
|
refs/heads/master
| 2020-06-14T09:41:41.631712
| 2017-03-09T19:00:41
| 2017-03-09T19:00:41
| 75,203,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from calk import pow, multi, summa
from calk.pow import pow as r
y = multi.multi(3, 5)
x = summa.summa(5, 7)
z = r(3, 5)
print(x, y, z)
print(dir(pow))
|
[
"coxatiy@gmail.com"
] |
coxatiy@gmail.com
|
ea2f608d540ed8ad36f731c18dc431613e5ab8fb
|
2db5bf5832ddb99e93bb949ace1fad1fde847319
|
/beginLearn/googleclass/class3/test.py
|
7ffd1cdf702097daef8aa3a03597ae2ab5d4ac2c
|
[] |
no_license
|
RoderickAdriance/PythonDemo
|
2d92b9aa66fcd77b6f797e865df77fbc8c2bcd14
|
98b124fecd3a972d7bc46661c6a7de8787b8e761
|
refs/heads/master
| 2020-04-06T17:36:46.000133
| 2018-11-15T07:07:03
| 2018-11-15T07:07:03
| 157,666,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
from __future__ import print_function
import math
from sklearn import metrics
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("data.csv")
# 数据随机化处理
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
california_housing_dataframe["median_house_value"] /= 1000.0
def my_input_fn(features, targets, batchsize=1, shuffle=True, num_epochs=None):
features = {key: np.array(value) for key, value in dict(features).items()}
ds = Dataset.from_tensor_slices((features, targets))
ds = ds.batch(batchsize).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
features, label = ds.make_one_shot_iterator().get_next()
return features, label
features,label = my_input_fn(california_housing_dataframe[["total_rooms"]], california_housing_dataframe[["median_house_value"]],
batchsize=1)
|
[
"1371553306@qq.com"
] |
1371553306@qq.com
|
a87804e82e918b9342cafbd8410b9e04151ab2e4
|
aee5f372ba1b5fbb1c8acf6080c4c86ae195c83f
|
/java-stubs/security/acl/__init__.pyi
|
e7af442f7ef702bb8b5218efb30dde7f6f0f22a0
|
[] |
no_license
|
rdemaria/pjlsa
|
25221ae4a4b6a4abed737a41a4cafe7376e8829f
|
e64589ab2203338db4253fbc05ff5131142dfd5f
|
refs/heads/master
| 2022-09-03T13:18:05.290012
| 2022-08-16T13:45:57
| 2022-08-16T13:45:57
| 51,926,309
| 1
| 5
| null | 2019-07-11T11:50:44
| 2016-02-17T13:56:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,887
|
pyi
|
import java.lang
import java.security
import java.util
import typing
class AclEntry(java.lang.Cloneable):
def addPermission(self, permission: 'Permission') -> bool: ...
def checkPermission(self, permission: 'Permission') -> bool: ...
def clone(self) -> typing.Any: ...
def getPrincipal(self) -> java.security.Principal: ...
def isNegative(self) -> bool: ...
def permissions(self) -> java.util.Enumeration['Permission']: ...
def removePermission(self, permission: 'Permission') -> bool: ...
def setNegativePermissions(self) -> None: ...
def setPrincipal(self, principal: java.security.Principal) -> bool: ...
def toString(self) -> str: ...
class AclNotFoundException(java.lang.Exception):
def __init__(self): ...
class Group(java.security.Principal):
def addMember(self, principal: java.security.Principal) -> bool: ...
def equals(self, object: typing.Any) -> bool: ...
def hashCode(self) -> int: ...
def isMember(self, principal: java.security.Principal) -> bool: ...
def members(self) -> java.util.Enumeration[java.security.Principal]: ...
def removeMember(self, principal: java.security.Principal) -> bool: ...
def toString(self) -> str: ...
class LastOwnerException(java.lang.Exception):
def __init__(self): ...
class NotOwnerException(java.lang.Exception):
def __init__(self): ...
class Owner:
def addOwner(self, principal: java.security.Principal, principal2: java.security.Principal) -> bool: ...
def deleteOwner(self, principal: java.security.Principal, principal2: java.security.Principal) -> bool: ...
def isOwner(self, principal: java.security.Principal) -> bool: ...
class Permission:
def equals(self, object: typing.Any) -> bool: ...
def toString(self) -> str: ...
class Acl(Owner):
def addEntry(self, principal: java.security.Principal, aclEntry: AclEntry) -> bool: ...
def checkPermission(self, principal: java.security.Principal, permission: Permission) -> bool: ...
def entries(self) -> java.util.Enumeration[AclEntry]: ...
def getName(self) -> str: ...
def getPermissions(self, principal: java.security.Principal) -> java.util.Enumeration[Permission]: ...
def removeEntry(self, principal: java.security.Principal, aclEntry: AclEntry) -> bool: ...
def setName(self, principal: java.security.Principal, string: str) -> None: ...
def toString(self) -> str: ...
class __module_protocol__(typing.Protocol):
# A module protocol which reflects the result of ``jp.JPackage("java.security.acl")``.
Acl: typing.Type[Acl]
AclEntry: typing.Type[AclEntry]
AclNotFoundException: typing.Type[AclNotFoundException]
Group: typing.Type[Group]
LastOwnerException: typing.Type[LastOwnerException]
NotOwnerException: typing.Type[NotOwnerException]
Owner: typing.Type[Owner]
Permission: typing.Type[Permission]
|
[
"michi.hostettler@cern.ch"
] |
michi.hostettler@cern.ch
|
55f4632468c9a46223772b259e528fabae433016
|
ee6074aad7bef8b8279130fa561b6eb0a6e66b1e
|
/modules/sample.py
|
20032a1959f97a7f6c2401e185e308b900bece59
|
[
"MIT"
] |
permissive
|
antiquefu/pycameresp
|
23ca208c2a19d445d164fbdc25303d4be040f381
|
d86814625a7cd2f7e5fa01b8e1652efc811cef3a
|
refs/heads/main
| 2023-08-01T01:40:04.384937
| 2021-09-19T17:30:48
| 2021-09-19T17:30:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
# Distributed under MIT License
# Copyright (c) 2021 Remi BERTHOLET
from server.httpserver import HttpServer
from htmltemplate import Br,ButtonCmd,Option,SwitchCmd,Tag,SliderCmd,ComboCmd,Paragraph
from webpage.mainpage import *
from tools import useful
# Called when the button pressed
@HttpServer.addRoute(b'/sample/button')
async def buttonPressed(request, response, args):
print("Button clicked")
await response.sendOk()
# Called when the slider state changed
@HttpServer.addRoute(b'/sample/slider')
async def sliderChanged(request, response, args):
print("Slider change to %d"%int(request.params[b"value"]))
await response.sendOk()
# Called when the combo state changed
@HttpServer.addRoute(b'/sample/combo')
async def comboChanged(request, response, args):
print("Number %s selected"%useful.tostrings(request.params[b"value"]))
await response.sendOk()
# Called when the switch state changed
@HttpServer.addRoute(b'/sample/switch')
async def switchChanged(request, response, args):
print("Switch change to %s"%useful.tostrings(request.params[b"value"]))
await response.sendOk()
# Test simple page with button
@HttpServer.addRoute(b'/sample', menu=b"Sample", item=b"Sample")
async def samplePage(request, response, args):
page = mainFrame(request, response, args, b"Sample",
Tag(b'''
<p>Example to interact with esp32 via an html page (see the content of file <b>sample.py</b>)</p>
'''),
ButtonCmd(text=b"Click on button", path=b"/sample/button"), Br(),Br(),
SliderCmd(min=b"10", max=b"30", step=b"2", value=b"12", text=b"Move slider", path=b"/sample/slider"), Br(),
ComboCmd(\
[
Option(value=b"One" , text=b"One"),
Option(value=b"Two" , text=b"Two", selected=True),
Option(value=b"Three" , text=b"Three"),
], path=b"/sample/combo", text=b"Select number"), Br(),Br(),
SwitchCmd(text=b"Change this switch", checked=True, path=b"/sample/switch"),
Br(),
Br(),
Paragraph(b"To eliminate this page delete the <b>sample.py</b> file"))
await response.sendPage(page)
|
[
"remi_bertholet@yahoo.fr"
] |
remi_bertholet@yahoo.fr
|
3f3cbc27b98cf2f962ea71902a23a95e4451122e
|
ee6acbd5fcd0fcd16230e96a4a539de41a02c97e
|
/operators/ibmcloud-iam-operator/python/pulumi_pulumi_kubernetes_crds_operators_ibmcloud_iam_operator/_tables.py
|
0e341dd0c30a19d942a6a75299bc8fdb6db597f2
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulumi-kubernetes-crds
|
777e78137aaf6525a44b61a02dccf91bf0d87a14
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
refs/heads/master
| 2023-03-15T04:29:16.039753
| 2020-12-30T19:35:54
| 2020-12-30T19:35:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
SNAKE_TO_CAMEL_CASE_TABLE = {
"access_group_def": "accessGroupDef",
"access_group_id": "accessGroupID",
"access_group_name": "accessGroupName",
"access_group_namespace": "accessGroupNamespace",
"api_version": "apiVersion",
"custom_role_name": "customRoleName",
"custom_role_namespace": "customRoleNamespace",
"custom_roles_d_name": "customRolesDName",
"custom_roles_def": "customRolesDef",
"defined_roles": "definedRoles",
"display_name": "displayName",
"group_id": "GroupID",
"policy_id": "policyID",
"resource_group": "resourceGroup",
"resource_id": "resourceID",
"resource_key": "resourceKey",
"resource_name": "resourceName",
"resource_value": "resourceValue",
"role_crn": "roleCRN",
"role_id": "roleID",
"role_name": "roleName",
"service_class": "serviceClass",
"service_i_ds": "serviceIDs",
"service_id": "serviceID",
"user_email": "userEmail",
"user_emails": "userEmails",
}
CAMEL_TO_SNAKE_CASE_TABLE = {
"accessGroupDef": "access_group_def",
"accessGroupID": "access_group_id",
"accessGroupName": "access_group_name",
"accessGroupNamespace": "access_group_namespace",
"apiVersion": "api_version",
"customRoleName": "custom_role_name",
"customRoleNamespace": "custom_role_namespace",
"customRolesDName": "custom_roles_d_name",
"customRolesDef": "custom_roles_def",
"definedRoles": "defined_roles",
"displayName": "display_name",
"GroupID": "group_id",
"policyID": "policy_id",
"resourceGroup": "resource_group",
"resourceID": "resource_id",
"resourceKey": "resource_key",
"resourceName": "resource_name",
"resourceValue": "resource_value",
"roleCRN": "role_crn",
"roleID": "role_id",
"roleName": "role_name",
"serviceClass": "service_class",
"serviceIDs": "service_i_ds",
"serviceID": "service_id",
"userEmail": "user_email",
"userEmails": "user_emails",
}
|
[
"albertzhong0@gmail.com"
] |
albertzhong0@gmail.com
|
e5ee33c78f39ec07d8e46db54f732e895ef0f629
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/203.py
|
5e6817c4973f210cb1eb8eadcf2f4966ec3e3bb1
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
import sys
in_ = sys.stdin
T = int(in_.readline())
for t in xrange(T):
r1 = int(in_.readline())
a1 = [map(int, in_.readline().split(' ')) for i in xrange(4)]
r2 = int(in_.readline())
a2 = [map(int, in_.readline().split(' ')) for i in xrange(4)]
res = set(a1[r1 - 1]).intersection(a2[r2 - 1])
prefix = 'Case #%d:' % (t + 1)
if len(res) == 1:
print prefix, list(res)[0]
elif len(res) > 1:
print prefix, 'Bad magician!'
else:
print prefix, 'Volunteer cheated!'
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
90218a595c5ced9205138e11fb5e03416aa92719
|
fe75ab418adfd723f48b8eafc80515c9fd913395
|
/LeetCode/!0232. Implement Queue using Stacks.py
|
7230a81dac20ba8732640c59c0492c589d73a031
|
[] |
no_license
|
AshkenSC/Programming-Practice
|
d029e9d901f51ef750ed4089f10c1f16783d2695
|
98e20c63ce1590deda6761ff2f9c8c37f3fb3c4a
|
refs/heads/master
| 2021-07-20T06:41:12.673248
| 2021-06-25T15:44:06
| 2021-06-25T15:44:06
| 127,313,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
'''
0232. Implement Queue using Stacks
请你仅使用两个栈实现先入先出队列。队列应当支持一般队列的支持的所有操作(push、pop、peek、empty)。
'''
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.stk = []
self.temp = []
self.current_queue = 0
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
1) 压入stk
"""
self.stk.append(x)
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
1)stk所有元素压入temp
2)出栈一次
3)temp中剩余元素压回stk,temp清空
"""
while len(self.stk) > 0:
self.temp.append(self.stk.pop())
top = self.temp.pop()
while len(self.temp) > 0:
cur = self.temp.pop()
self.stk.append(cur)
return top
def peek(self) -> int:
"""
Get the front element.
1)stk所有元素压入temp
2)top一次
3)temp元素放回stk
4)temp清空
"""
while len(self.stk) > 0:
self.temp.append(self.stk.pop())
top = self.temp[-1]
while len(self.temp) > 0:
self.stk.append(self.temp.pop())
return top
def empty(self) -> bool:
"""
Returns whether the queue is empty.
1)检查len(stk)
"""
return len(self.stk) < 1
|
[
"393940378@qq.com"
] |
393940378@qq.com
|
7476d47d2554a91d4a8cb9c7d91816e908594d7e
|
830a0667f2e70177e83ef394bce9972533ea449c
|
/arrayMaxConsecutiveSum.py
|
cd8969f3e5943780526236b2440a188963b4a77c
|
[] |
no_license
|
porosya80/codesignal
|
8659fba8cd9001efdca798590bacbfb4d41dc5b5
|
f26d5c739b093019a149047317cc32d9aa92541b
|
refs/heads/master
| 2020-03-22T21:54:23.342397
| 2018-12-05T05:05:51
| 2018-12-05T05:05:51
| 140,720,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
def arrayMaxConsecutiveSum(a, k):
res = [sum(a[0:k])]
for i in range(1, len(a)-k+1):
res.append(res[i-1]-a[i-1]+a[i+k-1])
return(max(res))
inputArray = [1, 3, 2, 4]
k = 3
# , the output should be
print(arrayMaxConsecutiveSum(inputArray, k))
# = 9.
# All possible sums of 2 consecutive elements are:
# 2 + 3 = 5
# 3 + 5 = 8
# 5 + 1 = 6
# 1 + 6 = 7.
# Thus, the answer is 8.
|
[
"porosya@gmail.com"
] |
porosya@gmail.com
|
2984b7bfb6e62a07535587dad44f16d99b08ffe2
|
9647524c0f4d93fb1c8a992c20fe9f9d2710cde3
|
/2-content/Python/pcc-master/chapter_12/ship.py
|
0df9da197640b0152c6bf885473c945151b024c2
|
[
"MIT"
] |
permissive
|
bgoonz/web-dev-notes-resource-site
|
16161aa68e8eecafeaba4dc7abeb957aaee864c5
|
e7dc9c30393597cb39830c49c3f51c1486b97584
|
refs/heads/master
| 2023-09-01T14:04:20.867818
| 2021-06-17T07:56:20
| 2021-06-17T07:56:20
| 329,194,347
| 7
| 5
|
MIT
| 2021-07-05T06:36:49
| 2021-01-13T04:34:20
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
import pygame
class Ship():
def __init__(self, ai_settings, screen):
"""Initialize the ship, and set its starting position."""
self.screen = screen
self.ai_settings = ai_settings
# Load the ship image, and get its rect.
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start each new ship at the bottom center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Store a decimal value for the ship's center.
self.center = float(self.rect.centerx)
# Movement flags.
self.moving_right = False
self.moving_left = False
def update(self):
"""Update the ship's position, based on movement flags."""
# Update the ship's center value, not the rect.
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
# Update rect object from self.center.
self.rect.centerx = self.center
def blitme(self):
"""Draw the ship at its current location."""
self.screen.blit(self.image, self.rect)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
8cb6535c3e1792c0646c81b62e0b3497165a47f3
|
bb976142e482afa6843271ed5c994734eca53e74
|
/FPV_ANN_pureRes_4D/utils/AdamW.py
|
314264d226666363e86465213d0d5b1a050a1c7e
|
[
"MIT"
] |
permissive
|
mhansinger/combustionML
|
552a7540864a2d6b173204cccfdc82ef8c8b2f8c
|
9e60324bbd026979d4241fbdd62faaff873ce2a9
|
refs/heads/master
| 2021-07-09T18:34:09.462100
| 2020-05-20T11:16:31
| 2020-05-20T11:16:31
| 102,774,653
| 0
| 2
|
MIT
| 2019-02-27T17:55:15
| 2017-09-07T18:57:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,130
|
py
|
"""From built-in optimizer classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import copy
from six.moves import zip
from tensorflow.keras import backend as K
from tensorflow.keras.utils.generic_utils import serialize_keras_object
from tensorflow.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.keras.legacy import interfaces
from tensorflow.keras.optimizers import Optimizer
class AdamW(Optimizer):
"""AdamW optimizer.
Default parameters follow those provided in the original paper.
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
weight_decay: float >= 0. Weight decay (L2 penalty) (default: 0.025).
batch_size: integer >= 1. Batch size used during training.
samples_per_epoch: integer >= 1. Number of samples (training points) per epoch.
epochs: integer >= 1. Total number of epochs for training.
# References
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
- [Fixing Weight Decay Regularization in Adam](https://arxiv.org/abs/1711.05101)
"""
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., weight_decay=0.025,
batch_size=1, samples_per_epoch=1,
epochs=1, **kwargs):
super(AdamW, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
self.weight_decay = K.variable(weight_decay, name='weight_decay')
self.batch_size = K.variable(batch_size, name='batch_size')
self.samples_per_epoch = K.variable(samples_per_epoch, name='samples_per_epoch')
self.epochs = K.variable(epochs, name='epochs')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
'''Bias corrections according to the Adam paper
'''
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
'''Schedule multiplier eta_t = 1 for simple AdamW
According to the AdamW paper, eta_t can be fixed, decay, or
also be used for warm restarts (AdamWR to come).
'''
eta_t = 1.
p_t = p - eta_t*(lr_t * m_t / (K.sqrt(v_t) + self.epsilon))
if self.weight_decay != 0:
'''Normalized weight decay according to the AdamW paper
'''
w_d = self.weight_decay*K.sqrt(self.batch_size/(self.samples_per_epoch*self.epochs))
p_t = p_t - eta_t*(w_d*p)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'weight_decay': float(K.get_value(self.weight_decay)),
'batch_size': int(K.get_value(self.batch_size)),
'samples_per_epoch': int(K.get_value(self.samples_per_epoch)),
'epochs': int(K.get_value(self.epochs)),
'epsilon': self.epsilon}
base_config = super(AdamW, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"maximilian.hansinger@unibw.de"
] |
maximilian.hansinger@unibw.de
|
e664fe95ab614d5939008f413cf277a1b48a0f36
|
3017b7399869057a8be7fb11ee9341b9c8f97ba4
|
/qa/elgs/pix_area.py
|
87c35a1217fde6a4aa8aaa86526db6b23f0097de
|
[] |
no_license
|
michaelJwilson/SV-QA
|
8f486422eb71b3fbd0d395904fd654ba432bd777
|
dd6095d570442852bb28ac9da0f18be7b83cddce
|
refs/heads/master
| 2020-07-29T16:04:55.759155
| 2019-12-20T14:37:23
| 2019-12-20T14:37:23
| 209,872,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,625
|
py
|
import os
import sys
import glob
import fitsio
import matplotlib
import pylab as pl
import pandas as pd
import numpy as np
import astropy.io.fits as fits
import matplotlib.pyplot as plt
import numpy.lib.recfunctions as rfn
import healpy as hp
from mpl_toolkits.axes_grid1 import make_axes_locatable
from fast_scatter import fast_scatter
from matplotlib import rc
from astropy.table import Table, vstack
from desitarget.targets import encode_targetid
from desitarget.geomask import is_in_box
from desitarget.targetmask import desi_mask
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy.stats import pearsonr
rc('font', **{'family':'serif', 'serif':['Times']})
rc('text', usetex=True)
##
nside = np.int(sys.argv[1])
parea = hp.nside2pixarea(nside, degrees = True)
def read_elgs(nrandom=20000, _all=False):
cols = ['NOBS_G', 'NOBS_R', 'NOBS_Z', 'PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R', 'GALDEPTH_Z', 'PSFSIZE_G', 'PSFSIZE_R', 'PSFSIZE_Z', 'PSFDEPTH_W1', 'PSFDEPTH_W2', 'MASKBITS']
if _all:
nrandom = -1
randoms = fitsio.FITS('/project/projectdirs/desi/target/catalogs/dr8/0.31.0/randomsall/randoms-inside-dr8-0.31.0-all.fits')
else:
randoms = fitsio.FITS('/project/projectdirs/desi/target/catalogs/dr8/0.31.0/randoms/randoms-inside-dr8-0.31.0-2.fits')
##
randoms = randoms[1][cols + ['RA', 'DEC']][:nrandom]
return randoms, cols
if __name__ == '__main__':
nside = 512
_all = True
save = False
##
randoms, cols = read_elgs(nrandom=5000, _all=_all)
isin = np.ones(len(randoms), dtype=np.float)
for bit in [1, 5, 6, 7, 11, 12, 13]:
isin = isin * (1.0 - np.clip(np.bitwise_and(randoms['MASKBITS'], np.ones_like(randoms['MASKBITS']) * 2 ** bit), a_min=0.0, a_max=1.0))
## isin = isin * np.array([(x & 2 ** bit) == 0 for x in randoms['MASKBITS']]).astype(np.int)
isin = isin.astype(np.bool)
remaining = randoms[isin]
print('Input: {}; Remaining: {} ({})'.format(len(randoms), len(remaining), np.count_nonzero(isin)))
## randoms = Table(data=randoms, names=['RA', 'DEC'] + cols)
## randoms.pprint()
'''
binary = np.load('/global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/healmaps/elg_tdensity_{}.npy'.format(nside))
rhpind = binary[:,0]
rhpra = binary[:,1]
rhpdec = binary[:,2]
rtdensity = binary[:,3]
'''
##
npix = hp.pixelfunc.nside2npix(nside)
indices = np.arange(npix)
result = np.zeros_like(indices)
denom = np.zeros_like(indices)
##
hppix = hp.ang2pix(nside, (90. - remaining['DEC']) * np.pi / 180., remaining['RA'] * np.pi / 180., nest=False)
hpind, cnts = np.unique(hppix, return_counts=True)
for i, ind in enumerate(hpind):
result[ind] += cnts[i]
hppix = hp.ang2pix(nside, (90. - randoms['DEC']) * np.pi / 180., randoms['RA'] * np.pi / 180., nest=False)
hpind, cnts = np.unique(hppix, return_counts=True)
for i, ind in enumerate(hpind):
denom[ind] += cnts[i]
mask = np.array(denom > 0.0).astype(np.int)
result = mask * result / denom
occupied = result[result > 0.0]
print('\n\nDeviation level: {:.3} per cent.'.format(100. * np.std(occupied)))
##
theta, phi = hp.pix2ang(nside, range(npix), nest=False)
hpra, hpdec = 180. / np.pi * phi, 90. -180. / np.pi * theta
if save:
np.save(os.environ['CSCRATCH'] + '/BGS/SV-ASSIGN/elgs/pix_area.npy', np.c_[np.arange(npix), hpra, hpdec, result])
print('\n\nDone.\n\n')
if not _all:
fig, axarr = plt.subplots(nrows=1, ncols=1, figsize=(10, 10))
plt.subplots_adjust(left = 0.05, right = 0.95, hspace=0.4, wspace=0.2, top = 0.955, bottom = 0.025)
hpra[hpra > 300.] -= 360.
hpra += 60.
fast_scatter(axarr, hpra, hpdec, mask, -0.1, 1.1, 50, cmap='BuPu', printit=False)
axarr.set_xlim(365., -5.)
pl.savefig('pix_area_test.png')
|
[
"mjwilson@lbl.gov"
] |
mjwilson@lbl.gov
|
2f566142ff25a00fdbacb0a7a723b1cd6b2bad90
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/90/usersdata/197/60329/submittedfiles/matriz2.py
|
85af8758f0a532d442df4077a2a04b076b7f95fe
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
# -*- coding: utf-8 -*-
import numpy AS np
n=int(input('Digite o numero de linhas e colunas:'))
a=np.zeros((n,n))
for i in range (0, a.shape[0]
def somas iguais(a):
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5dc345bc932009a8b62c1f5f0ee1a5f1c621fe79
|
faf4c1055f50ca4dd81d0fc2f16f5e95905e3827
|
/protocol/split_mutations.py
|
ec227caabe40b8148918a621e55bf3e32706bad5
|
[] |
no_license
|
Ningshiqi/protocol
|
07e0e95c57d74e0ea2bbbfd7d49f9dc42c5dc37e
|
8a906a79a2835a61d252cafc2452b407230c4409
|
refs/heads/master
| 2021-01-20T07:27:38.880511
| 2016-06-22T18:39:15
| 2016-06-22T18:39:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,851
|
py
|
from random_split import RandomSplit
import pandas as pd
import sys
import argparse
import os
def mymkdir(dir_path):
try:
os.mkdir(dir_path)
except OSError:
pass
def parse_arguments():
d = ('Generate splits of the data while respecting the proportion '
'of each tumor type in the split.')
parser = argparse.ArgumentParser(description=d)
help_str = 'Mutations in correct format'
parser.add_argument('-i', '--input',
type=str, required=True,
help=help_str)
help_str = 'Number of times to perform random split of data'
parser.add_argument('-n', '--number',
type=int, default=1,
help=help_str)
help_str = ('Column name containing the sample ID. Default: automatically'
' checks whether "Tumor_Sample_Barcode" or "Tumor_Sample" is a column.')
parser.add_argument('-s', '--sample-col',
type=str, default=None,
help=help_str)
help_str = 'Output directory for mutations split into two files'
parser.add_argument('-o', '--output',
type=str, required=True,
help=help_str)
args = parser.parse_args()
return vars(args)
def main(opts):
# try to make directory
mymkdir(opts['output'])
# read in data
df = pd.read_csv(opts['mutations'], sep='\t')
# figure out the sample column
if opts['sample_col'] is not None:
samp_col = opts['sample_col']
if 'Tumor_Sample_Barcode' in df.columns:
samp_col = 'Tumor_Sample_Barcode'
elif 'Tumor_Sample' in df.columns:
samp_col = 'Tumor_Sample'
else:
logger.warning('Please specify the column name for the sample ID (--sample-col)')
sys.exit(1)
# setup random splitting object
SAMPLE_RATE = .5 # half splits
dfg = RandomSplit(df.copy(),
col_name=samp_col,
sub_sample=SAMPLE_RATE,
num_iter=opts['number'])
# make random splits
for i, (left_df, right_df) in enumerate(dfg.dataframe_generator()):
output_dir = os.path.join(opts['output'], 'Iteration_{0}'.format(i))
mymkdir(output_dir)
# make sure data is sorted by genes, so no problem with entropy script
# assuming sorted order
left_df.sort(columns=['Gene'], inplace=True)
right_df.sort(columns=['Gene'], inplace=True)
lout_path = os.path.join(output_dir, 'first.txt')
left_df.to_csv(lout_path,
sep='\t', index=False)
rout_path = os.path.join(output_dir, 'second.txt')
right_df.to_csv(rout_path,
sep='\t', index=False)
if __name__ == '__main__':
opts = parse_arguments()
main(opts)
|
[
"collintokheim@gmail.com"
] |
collintokheim@gmail.com
|
2421c173dbdd049106e02be81e7dfa5430fd987e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02629/s238693041.py
|
ab02df2f11872200d5371b13356ba05aa505c465
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
import sys , math
from bisect import bisect
N=int(input())
alp="abcdefghijklmnopqrstuvwxyz"
R=[]
i=1
tot = 0
while tot < 1000000000000001:
tar = 26**i
tot+=tar
R.append(tot+1)
i+=1
keta = bisect(R , N)+1
if keta == 1:
print(alp[N-1])
sys.exit()
ans = ""
M = N - R[keta - 1]
for i in range(keta):
j = keta - i - 1
ind = M // (26**j)
M -= ind * (26**j)
ans+=alp[ind]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
da07d731ba5449ac6f1e1a753266a1d267e42d87
|
99af867b8112a2a7ca55b204d795141ccf6a3906
|
/ironbox/evaluation_models/classify_fcnet.py
|
7a5e081fc1718e43c924985a3198e2a21d39fccc
|
[] |
no_license
|
evanthebouncy/class_and_style
|
5f165f75e3850919e90a05a72533209429efed04
|
d434e2414526bece7d6757f4c9c1ccb94263769f
|
refs/heads/master
| 2020-03-29T18:09:36.417411
| 2019-02-04T22:10:33
| 2019-02-04T22:10:33
| 150,197,318
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# ===================== FC NN CLASSIFIER =====================
def to_torch(x, dtype, req = False):
tor_type = torch.cuda.LongTensor if dtype == "int" else torch.cuda.FloatTensor
x = Variable(torch.from_numpy(x).type(tor_type), requires_grad = req)
return x
# A simple fully connected neural nework representing a function that maps
# A input_dim input to k classes
class FCNet(nn.Module):
# require input dimension, k_classes outputs
# input-layer, input_dim // 2 hidden layer, k_class output layer
def __init__(self, input_dim, k_classes):
super(FCNet, self).__init__()
self.name = "FCNet"
self.fc = nn.Linear(input_dim, input_dim // 2)
self.pred = nn.Linear(input_dim // 2, k_classes)
self.opt = torch.optim.Adam(self.parameters(), lr=0.001)
def predict(self, x):
x = F.relu(self.fc(x))
x = F.log_softmax(self.pred(x), dim=1)
return x
# train until saturation
# assume train_corpus is a data_holder that supports a 'get_sample(n_batch)'
# function which return some samples
def learn(self, train_corpus):
losses = []
while True:
# terminate if no improvement
if len(losses) > 2100:
if losses[-1] < 1e-3:
break
near_data_loss = np.mean(losses[-1000:])
far_data_loss = np.mean(losses[-2000:-1000])
# if average loss of last 1k iteration is greater than 99% of the
# last last 1k iterations, stop too
if near_data_loss > 0.99 * far_data_loss:
break
# randomly sample a batch of data
X_batch, Y_batch = train_corpus.get_sample(40)
# convert to proper torch forms
X_batch = to_torch(X_batch, "float")
Y_batch = to_torch(Y_batch, "int")
# optimize
self.opt.zero_grad()
output = self.predict(X_batch)
loss = F.nll_loss(output, Y_batch)
losses.append( loss.data.cpu().numpy() )
loss.backward()
self.opt.step()
# evaluate the model on the test_corpus, here test_corpus is assumed to be simply
# a pair of X, Y
def evaluate(self, test_corpus):
X, Y = test_corpus
X = to_torch(X, "float")
label_pred = np.argmax(self.predict(X).data.cpu().numpy(), axis=1)
return np.sum(label_pred != Y) / len(Y)
if __name__ == '__main__':
fcnet = FCNet(100, 4)
print ("hi")
|
[
"evanthebouncy@gmail.com"
] |
evanthebouncy@gmail.com
|
b761885bc3e88a2116f44fd2680442026270b9cd
|
30b4d3122db7146d07a6eb431f6c1030f716aaa8
|
/memegen/memegen/routes/examples.py
|
a25caff591e76aece6f65e128a388b085439301a
|
[
"MIT"
] |
permissive
|
flavienbwk/memegen
|
b00df3d2a2fb68f7b2de88e0ed158280f99f7fa7
|
dcb5635ad556d1c855fc3851609b32b1be133441
|
refs/heads/master
| 2020-11-25T03:38:56.626649
| 2019-12-16T23:59:24
| 2019-12-16T23:59:24
| 228,484,764
| 0
| 0
|
NOASSERTION
| 2019-12-16T22:09:08
| 2019-12-16T22:09:07
| null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
from flask import Blueprint, render_template, current_app, make_response
from ._utils import samples
blueprint = Blueprint('examples-page', __name__)
@blueprint.route("/examples")
def get():
sample_images = list(samples())
html = render_template(
"examples.html",
sample_images=sample_images,
config=current_app.config,
)
response = make_response(html)
response.headers['Cache-Control'] = f'max-age={60*60*24*7}'
return response
|
[
"jacebrowning@gmail.com"
] |
jacebrowning@gmail.com
|
d003a7c6ff75f74dbe0eb5bd94675975cd7a687c
|
b7a31624f827b16f2b6e1cc6b61bf8529191ff0f
|
/matplotlib/多序列堆积条状图.py
|
49140fe1a9bbbb3e291ee0348f5d86c95563913a
|
[] |
no_license
|
thj120000/python
|
eb7daddefef6b9a1039e8ef58c3af88e05ffb794
|
a52aad1e57b100064db3e1e3a2af3c02eded4bf7
|
refs/heads/master
| 2020-04-24T19:29:48.741308
| 2019-03-03T11:11:12
| 2019-03-03T11:11:12
| 172,213,828
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
#!@Author : Sanwat
#!@File : .py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
'''
bar()类型,可以将pandas 的dataframe he series用图表表示出来
'''
plt.figure(1)
series1= np.array([3,4,5,3])
series2= np.array([1,2,2,5])
series3= np.array([2,3,3,4])
index= np.arange(4)
plt.axis([0,4,0,15])
plt.bar(index,series1,color='r')
plt.bar(index,series2,color='g',bottom=series1)
plt.bar(index,series3,color='b',bottom=series1+series2)
plt.xticks(index-0.4,['Jan15','Feb15','Mar15','Apr15'])
plt.show()
plt.figure(2)
plt.axis([0,15,0,4])
plt.barh(index,series1,color='g')
plt.barh(index,series2,color='r',left=series1)
plt.barh(index,series3,color='b',left=series1+series2)
plt.yticks(index-0.4,['Jan','Feb','Mar','Apr'])
plt.show()
'''
下面我们用不同的颜色来区分多个序列。
'''
plt.figure(3)
plt.axis([0,15,0,4])
plt.title('A Multiseries Horizontal Bar Chart',fontsize=20,color='r')
plt.barh(index,series1,color='w',hatch='xx')#hatch关键字指定线条的类型
plt.barh(index,series2,color='w',hatch= '///',left=series1)
plt.barh(index,series3,color='w',hatch='\\\\\\',left=series1+series2)
plt.yticks(index-0.4,['Jan','Feb','Mar','Apr'])
plt.show()
|
[
"2464237217@qq.com"
] |
2464237217@qq.com
|
c2a45a4d33438fa9313023600490c3ba37f977e5
|
751b094918ae9200afe7824d58804549082caa95
|
/src/python/WMComponent/DBS3Buffer/Oracle/DBSBufferFiles/SetLocationByLFN.py
|
b5ca849beb2bedc814bf1f1956da2893d1dd3764
|
[] |
no_license
|
cinquo/WMCore
|
7ebd13269f42eb97f416f8f2bdaca05fa93c6afc
|
122f9332f2e944154dd0df68b6b3f2875427b032
|
refs/heads/master
| 2021-01-09T06:28:58.947626
| 2013-06-05T08:31:53
| 2013-06-05T08:31:53
| 2,965,330
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
#!/usr/bin/env python
"""
_SetLocationByLFN_
Oracle implementation of DBSBuffer.SetLocationByLFN
"""
from WMComponent.DBS3Buffer.MySQL.DBSBufferFiles.SetLocationByLFN import SetLocationByLFN as MySQLSetLocationByLFN
class SetLocationByLFN(MySQLSetLocationByLFN):
"""
Set the location of files using lfn as the key
"""
sql = """INSERT INTO dbsbuffer_file_location (filename, location)
SELECT df.id, dl.id
FROM dbsbuffer_file df, dbsbuffer_location dl
WHERE df.lfn = :lfn
AND dl.se_name = :sename
"""
|
[
"sfoulkes@4525493e-7705-40b1-a816-d608a930855b"
] |
sfoulkes@4525493e-7705-40b1-a816-d608a930855b
|
15c3b44a518019dd9db25700f49622a3c9501ea3
|
488ae9723f148082e949448eed2fdcb4c111e784
|
/bill-calculator.py
|
61c9064f33cd5cda10414550cac73be05a057b70
|
[] |
no_license
|
priyanka-advani/hba_functions_exercise
|
91b55ab221557d6358a6364bc8d26bb9644b12fc
|
d4a1d6cbfc90cc3fbee8157ba2e8707d36ef8484
|
refs/heads/master
| 2021-01-20T02:39:11.656709
| 2017-04-26T04:20:16
| 2017-04-26T04:20:16
| 89,437,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
"""
This is Part 4 of the Hackbright Prep functions exercise
"""
def calculate_tip(bill_amt, tip_percentage):
"""Given the bill amount and tip percentage, calculates the tip."""
# ENTER YOUR CODE HERE
tip_amt = bill_amt * tip_percentage
return tip_amt
def calculate_total(bill_amt, tip_amt):
"""Given the tip amount and the bill amount, calculates the total bill."""
# ENTER YOUR CODE HERE
total = bill_amt + tip_amt
return total
def split_bill(total, number_of_people):
"""Given the bill total and the number of people, calculates the total per person."""
# ENTER YOUR CODE HERE
total_per_person = total / number_of_people
return total_per_person
def total_per_person():
"""Gets user input for bill amount, tip %, and # of people. Returns total per person.
This function should:
1. Get user input for the bill amount, tip percentage, and # of people
2. Calculate the tip amount and save it to a variable.
3. Using the tip amount calculated above, find the total bill amount.
4. Using the total found above, calculate the total per person.
"""
# ENTER YOUR CODE HERE
bill_amt = int(raw_input("Enter bill amount: "))
tip_percentage = float(raw_input("Enter tip percentage: "))
number_of_people = int(raw_input("Enter number of people: "))
tip_amt = calculate_tip(bill_amt, tip_percentage)
total = calculate_total(bill_amt, tip_amt)
per_person = split_bill(total, number_of_people)
return per_person
##############################################################################
# Don't touch the code below, this will allow us to run the total_per_person function when we
# run our python file in the terminal using `python bill-calculator.py`
if __name__ == "__main__":
print total_per_person()
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
493c4d78f5463a27107352a23c732fa1f16841e4
|
def4838e05acb0932f3b51ce689b5b264bf4ebfd
|
/apps/users/forms.py
|
bfaf6e3a5f29e314158102103fa93e41451581f5
|
[] |
no_license
|
shishengjia/Django_By_Example_Bookmarks
|
6ce125b2f15348b6b2fec26c4ff8694f000194e6
|
377d7ce31ea66e5e55032fcf47d84a1afd9acdbe
|
refs/heads/master
| 2021-01-19T14:16:52.566325
| 2017-04-27T12:23:41
| 2017-04-27T12:23:41
| 88,141,126
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
from django import forms
from .models import UserProfile
class LoginForm(forms.Form):
username = forms.CharField(error_messages={'required': '请填写您的姓名'})
password = forms.CharField(min_length=6, error_messages={'required': '请填写您的密码',
'min_length': '密码不能少于6位'})
class RegisterForm(forms.ModelForm):
username = forms.CharField()
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta:
model = UserProfile
fields = ('username', )
|
[
"shishengjia1@live.com"
] |
shishengjia1@live.com
|
d50e226c0416782f1bbf88fe9f0570b0fca3618a
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/recognize_thailand_idcard_request.py
|
b0ea673ab51acb3c850cea2011a6aae59dd80fec
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,613
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecognizeThailandIdcardRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'enterprise_project_id': 'str',
'body': 'ThailandIdcardRequestBody'
}
attribute_map = {
'enterprise_project_id': 'Enterprise-Project-Id',
'body': 'body'
}
def __init__(self, enterprise_project_id=None, body=None):
"""RecognizeThailandIdcardRequest
The model defined in huaweicloud sdk
:param enterprise_project_id: 企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:type enterprise_project_id: str
:param body: Body of the RecognizeThailandIdcardRequest
:type body: :class:`huaweicloudsdkocr.v1.ThailandIdcardRequestBody`
"""
self._enterprise_project_id = None
self._body = None
self.discriminator = None
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if body is not None:
self.body = body
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this RecognizeThailandIdcardRequest.
企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:return: The enterprise_project_id of this RecognizeThailandIdcardRequest.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this RecognizeThailandIdcardRequest.
企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:param enterprise_project_id: The enterprise_project_id of this RecognizeThailandIdcardRequest.
:type enterprise_project_id: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def body(self):
"""Gets the body of this RecognizeThailandIdcardRequest.
:return: The body of this RecognizeThailandIdcardRequest.
:rtype: :class:`huaweicloudsdkocr.v1.ThailandIdcardRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this RecognizeThailandIdcardRequest.
:param body: The body of this RecognizeThailandIdcardRequest.
:type body: :class:`huaweicloudsdkocr.v1.ThailandIdcardRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognizeThailandIdcardRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
c4cf4c885cd3eb01d8f58c317d42c028c234ea00
|
2103809bc3df62449488328946d0914241ced863
|
/Project Specific Demo/TrackingInput.py
|
d24851795ed6c84c16062e6b02261e832b991cd9
|
[] |
no_license
|
PMiskew/Year9DesignCS4-PythonPM
|
b5fb4382091cd8be63a43371ac614be74fb4c268
|
eeeace0b0ff89e1e4a65361f4e40167e7f4c3cc4
|
refs/heads/master
| 2021-06-23T17:09:22.626668
| 2019-09-21T23:30:25
| 2019-09-21T23:30:25
| 148,173,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
import tkinter as tk
def submit():
print("Submit pressed")
list.append(ent.get())
print(list)
lab.config(text = "Changed")
#This function will parse a string and
#add a new element to the list for all
#values
#creates an empty list
list = []
root = tk.Tk()
lab = tk.Label(root, text = "Input Food")
lab.pack()
ent = tk.Entry(root)
ent.pack()
btn = tk.Button(root, text = "Submit", command = submit)
btn.pack()
root.mainloop()
|
[
"paul.miskew@gmail.com"
] |
paul.miskew@gmail.com
|
61b9db032c2ab839765d189ba8fcfa04f26494d8
|
6a6984544a4782e131510a81ed32cc0c545ab89c
|
/src/production-histograms/.svn/pristine/61/61b9db032c2ab839765d189ba8fcfa04f26494d8.svn-base
|
65cb661716719f6def08c48e49a24dc9e8215348
|
[] |
no_license
|
wardVD/IceSimV05
|
f342c035c900c0555fb301a501059c37057b5269
|
6ade23a2fd990694df4e81bed91f8d1fa1287d1f
|
refs/heads/master
| 2020-11-27T21:41:05.707538
| 2016-09-02T09:45:50
| 2016-09-02T09:45:50
| 67,210,139
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
from math import cos, log10, isnan
from I3Tray import I3Units
from icecube import icetray, dataclasses
from icecube.production_histograms.histograms.histogram import Histogram
from icecube.production_histograms.histogram_modules.histogram_module import HistogramModule
class CORSIKAWeightModule(HistogramModule):
def __init__(self):
HistogramModule.__init__(self)
self.frame_key = "CorsikaWeightMap"
self.append(Histogram(, , , "FluxSum"))
self.append(Histogram(, , , "Weight"))
def DAQ(self, frame):
if self.frame_key not in frame :
return
weight_dict = frame[self.frame_key]
if "FluxSum" in weight_dict:
self.histograms["FluxSum"].fill(weight_dict["FluxSum"])
if "Weight" in weight_dict:
self.histograms["Weight"].fill(weight_dict["Weight"])
|
[
"wardvandriessche@gmail.com"
] |
wardvandriessche@gmail.com
|
|
2efbf244a54953af90a9268a39fcbe2a09738c25
|
ac15eda44e8dcfee6dff62f514c5b98a3382f50d
|
/python/pygame/ftetris/lib/main.py
|
a645b4bdff5dcc5af6ca9c0ec63994b06f58037b
|
[] |
no_license
|
yangruihan/raspberrypi
|
5789c1a2e72d4012d46563d0644b08d032d346e6
|
22bc1a06b25e129a4314f4bc9cec5112affda136
|
refs/heads/master
| 2022-12-27T09:27:05.102020
| 2020-10-13T09:41:24
| 2020-10-13T09:41:24
| 32,977,936
| 4
| 0
| null | 2022-12-16T01:47:53
| 2015-03-27T09:30:43
|
Java
|
UTF-8
|
Python
| false
| false
| 374
|
py
|
import tetris
class Main:
def __init__(self, screen):
self.screen = screen
def run(self, elapse):
return self.tetris.update(elapse)
def start(self, kind):
if kind == 6:
self.tetris = tetris.Tetris(self.screen)
else:
self.tetris = eval(
"tetris.Tetris" + str(kind) + "(self.screen)")
|
[
"yangruihan@vip.qq.com"
] |
yangruihan@vip.qq.com
|
5a8960b4e94e566305e39cc4e66dbd7bf77d4ab6
|
1dadb20cff6127dd950521aa5a747f6309eac7cd
|
/users/models.py
|
c3e6b289a6deca7d3547d180502d55e9938f8027
|
[] |
no_license
|
profmcdan/csu-core
|
a40ca42104728146e5951bdb2856c75d0090a96a
|
24091936f167fb03988da9226b76a90d1a6259d9
|
refs/heads/master
| 2022-12-12T12:15:06.106113
| 2019-11-17T13:04:09
| 2019-11-17T13:04:09
| 220,621,544
| 0
| 0
| null | 2022-12-08T06:51:12
| 2019-11-09T09:44:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,955
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from .managers import UserManager
class User(AbstractBaseUser, PermissionsMixin):
created_on = models.DateTimeField(
auto_now_add=True,
null=True,
)
modified_on = models.DateTimeField(
auto_now=True,
null=True,
)
deactivated = models.DateTimeField(
blank=True,
null=True,
verbose_name='Deactivated',
help_text='Designates whether this user should be treated as deactivated',
)
first_name = models.CharField(
max_length=30,
blank=True,
null=True,
verbose_name='First Name',
)
last_name = models.CharField(
max_length=30,
blank=True,
null=True,
verbose_name='Last Name',
)
middle_name = models.CharField(
max_length=30,
blank=True,
null=True,
verbose_name='Middle Name',
)
email = models.EmailField(
unique=True,
max_length=50,
verbose_name='Email',
)
phone = models.CharField(
max_length=30,
blank=True,
null=True,
verbose_name='Phone Number',
)
is_staff = models.BooleanField(
default=False,
verbose_name='Is Staff',
help_text='Designates whether the user can log into Django admin site',
)
password_reset_token = models.CharField(
max_length=50,
blank=True,
null=True,
verbose_name='Password Reset Token',
)
invitation_token = models.CharField(
max_length=50,
blank=True,
null=True,
verbose_name='Invitation Token'
)
USERNAME_FIELD = 'email'
objects = UserManager()
class Meta:
get_latest_by = 'date_joined'
def __str__(self):
return self.email
|
[
"danielale9291@gmail.com"
] |
danielale9291@gmail.com
|
40a7027d6d97d67adb0811ae98b4be1e70558c8a
|
05a70c12df808455100598d8a6fdb5635c641ab8
|
/Ago-Dic-2019/Ejemplos/Design Patterns/Strategy/ShellSortStrategy.py
|
a79568c20a8f1ce41d829d0bea6a44c82658e388
|
[
"MIT"
] |
permissive
|
Jonathan-aguilar/DAS_Sistemas
|
991edcc929c33ba9bb8bc84e741b55c10a8420a3
|
4d02efc64161871084df1bff258112351e5d1241
|
refs/heads/development
| 2023-07-24T12:26:54.698452
| 2021-09-02T20:52:26
| 2021-09-02T20:52:26
| 289,764,892
| 1
| 0
|
MIT
| 2021-09-02T20:52:27
| 2020-08-23T20:54:55
|
Python
|
UTF-8
|
Python
| false
| false
| 192
|
py
|
from SortStrategy import SortStrategy
class ShellSortStrategy(SortStrategy):
"""docstring for ShellSortStrategy"""
def sort(self, my_list = []):
return "Lista ordenada con Shell Sort!"
|
[
"anhell.death999@gmail.com"
] |
anhell.death999@gmail.com
|
9370579be77282af8b2b53e2f4fac9a305ab7f6d
|
825c73b9a0db8f65d948a127bd8ed772192f42a3
|
/tsai/models/RNN.py
|
138e27f0cfda1214a4fe9ca97f9c829c59e52f4c
|
[
"Apache-2.0"
] |
permissive
|
avimec13/tsai
|
d9dd5b7529554f238984ac82ab74cc41cc419b83
|
8ffa0afbcac6f886c3cb8310fa60d636becb1799
|
refs/heads/main
| 2023-06-15T16:34:44.215741
| 2021-07-17T19:34:32
| 2021-07-17T19:34:32
| 388,736,804
| 1
| 0
|
Apache-2.0
| 2021-07-23T08:47:26
| 2021-07-23T08:47:26
| null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/105_models.RNN.ipynb (unless otherwise specified).
__all__ = ['RNN', 'LSTM', 'GRU']
# Cell
from ..imports import *
from .layers import *
# Cell
class _RNN_Base(Module):
def __init__(self, c_in, c_out, hidden_size=100, n_layers=1, bias=True, rnn_dropout=0, bidirectional=False, fc_dropout=0.):
self.rnn = self._cell(c_in, hidden_size, num_layers=n_layers, bias=bias, batch_first=True, dropout=rnn_dropout, bidirectional=bidirectional)
self.dropout = nn.Dropout(fc_dropout) if fc_dropout else noop
self.fc = nn.Linear(hidden_size * (1 + bidirectional), c_out)
def forward(self, x):
x = x.transpose(2,1) # [batch_size x n_vars x seq_len] --> [batch_size x seq_len x n_vars]
output, _ = self.rnn(x) # output from all sequence steps: [batch_size x seq_len x hidden_size * (1 + bidirectional)]
output = output[:, -1] # output from last sequence step : [batch_size x hidden_size * (1 + bidirectional)]
output = self.fc(self.dropout(output))
return output
class RNN(_RNN_Base):
_cell = nn.RNN
class LSTM(_RNN_Base):
_cell = nn.LSTM
class GRU(_RNN_Base):
_cell = nn.GRU
|
[
"“oguiza@gmail.com”"
] |
“oguiza@gmail.com”
|
8eecc943b82223f0ac6ee54536b2991412a8e8e1
|
af2e728ecbec0c55b183dbbfc9ee58f0da7ae1a5
|
/django/contrib/contenttypes/migrations/0002_remove_content_type_name.py
|
1b91437dbc5b33a0bc3cc8ac834231ca7fcea139
|
[
"BSD-3-Clause"
] |
permissive
|
mdiener21/django
|
797f366d506a590e4d9dd52d20bfa12436b0c183
|
c62791bfe4e9e18304bb125e5619c34d8afeb19f
|
refs/heads/master
| 2021-01-22T13:02:52.947589
| 2015-01-23T10:13:27
| 2015-01-23T10:13:27
| 29,727,259
| 1
| 0
| null | 2015-01-23T10:06:18
| 2015-01-23T10:06:18
| null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_legacy_name(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
for ct in ContentType.objects.all():
try:
ct.name = apps.get_model(ct.app_label, ct.model)._meta.object_name
except:
ct.name = ct.model
ct.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contenttype',
options={'verbose_name': 'content type', 'verbose_name_plural': 'content types'},
),
migrations.AlterField(
model_name='contenttype',
name='name',
field=models.CharField(max_length=100, null=True),
),
migrations.RunPython(
migrations.RunPython.noop,
add_legacy_name,
),
migrations.RemoveField(
model_name='contenttype',
name='name',
),
]
|
[
"info@markusholtermann.eu"
] |
info@markusholtermann.eu
|
cce788f7d4808adac6a84342dfe4fc04fb0035c7
|
5e27c7f5426c169fd348b26e94b65c35f9cdc459
|
/tutorial/canvas/start3/workers/coloredtextbox_panda.py
|
c908c7a9b44518334e0eeb4234d8fe945f96a599
|
[
"BSD-2-Clause"
] |
permissive
|
agoose77/hivesystem
|
e2c9c27408233b5794151ca74f541d2e6063d58a
|
e1f55c5ea530a989477edb896dcd89f3926a31b8
|
refs/heads/master
| 2020-07-21T23:07:37.178856
| 2014-08-23T02:13:19
| 2014-08-23T02:13:19
| 20,776,359
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
# try to import Panda3D, but delay exceptions until the class is actually used
try:
from panda3d.core import NodePath, TextNode
import panda3d
except ImportError:
panda3d = None
#coloredtextbox class: this will be converted to a canvas drone using "build_canvasdrone"
class coloredtextbox(object):
#obligatory argument list for __init__: canvasdrone, object, identifier, parameters
def __init__(self, canvasdrone, ctb, identifier, parameters):
if panda3d is None: raise ImportError("Cannot locate Panda3D")
from dragonfly.canvas import box2d
if identifier is None: identifier = ""
self.node = None
box = box2d(ctb.posx, ctb.posy, ctb.sizex, ctb.sizey, ctb.sizemode)
self.pnode = canvasdrone._get_parent_nodepath(identifier, box)
self._show(ctb, identifier)
#obligatory method "update". Argument list: object, identifier, parameters
def update(self, ctb, identifier, parameters):
self._show(ctb, identifier)
#obligatory method "remove"
def remove(self):
if self.pnode is not None:
self.pnode.removeNode()
self.pnode = None
def _show(self, ctb, identifier):
if self.node is not None: self.node.removeNode()
tnode = TextNode(identifier)
tnode.setText(ctb.text)
r, g, b, a = ctb.textcolor
tnode.setTextColor(r, g, b, a)
r, g, b, a = ctb.boxcolor
tnode.setCardColor(r, g, b, a)
tnode.setCardAsMargin(0, 0, 0, 0)
tnode.setCardDecal(True)
node = NodePath(tnode)
self._scale(tnode, node)
node.reparentTo(self.pnode)
self.node = node
def _scale(self, tnode, node):
top, bottom = tnode.getTop(), tnode.getBottom()
l, r = tnode.getLeft(), tnode.getRight()
w, h = r - l, top - bottom
scalex = 0
if w > 0: scalex = 1.0 / w
scaley = 0
if h > 0: scaley = 1.0 / h
node.setScale(scalex, 1, -scaley)
dimx = w * scalex
midx = (l * scalex + r * scalex) / 2.0
dimy = h * scaley
midy = (top * scaley + bottom * scaley) / 2.0
node.setPos(-midx + 0.5, 0, midy - 0.5)
import bee
from dragonfly.canvas import canvasdrone
from dragonfly.pandahive import build_canvasdrone
coloredtextbox_panda = build_canvasdrone(
wrappedclass=coloredtextbox,
classname="coloredtextbox_panda",
drawshow="show",
drawshowtype=("object", "coloredtextbox"),
baseclass=canvasdrone
)
|
[
"goosey15@gmail.com"
] |
goosey15@gmail.com
|
52a3f85e87f9b45fb9ac69bf0e8ce6265b792311
|
933376c11498a6567da8d7eb7d2675100895c3ba
|
/pyzoo/zoo/tfpark/text/estimator/bert_classifier.py
|
1e804709fef7c112b4e4207be94bb878986e0527
|
[
"Apache-2.0"
] |
permissive
|
intel-analytics/analytics-zoo
|
320a461765f86d41dd456b598b1cf1d51d57f4c4
|
7cc3e2849057d6429d03b1af0db13caae57960a5
|
refs/heads/master
| 2023-08-13T20:47:58.621714
| 2023-07-06T00:49:11
| 2023-07-06T00:49:11
| 90,328,920
| 3,104
| 996
|
Apache-2.0
| 2023-09-06T01:51:18
| 2017-05-05T02:27:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,312
|
py
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.tfpark.text.estimator import *
def make_bert_classifier_model_fn(optimizer):
def _bert_classifier_model_fn(features, labels, mode, params):
"""
Model function for BERTClassifier.
:param features: Dict of feature tensors. Must include the key "input_ids".
:param labels: Label tensor for training.
:param mode: 'train', 'eval' or 'infer'.
:param params: Must include the key "num_classes".
:return: tf.estimator.EstimatorSpec.
"""
import tensorflow as tf
from zoo.tfpark import ZooOptimizer
output_layer = bert_model(features, labels, mode, params).get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [params["num_classes"], hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [params["num_classes"]], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if mode == tf.estimator.ModeKeys.TRAIN:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=probabilities)
else:
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=params["num_classes"], dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, predictions=probabilities,
loss=loss)
else:
train_op = ZooOptimizer(optimizer).minimize(loss)
return tf.estimator.EstimatorSpec(mode=mode, train_op=train_op, loss=loss)
return _bert_classifier_model_fn
class BERTClassifier(BERTBaseEstimator):
"""
A pre-built TFEstimator that takes the hidden state of the first token of BERT
to do classification.
:param num_classes: Positive int. The number of classes to be classified.
:param bert_config_file: The path to the json file for BERT configurations.
:param init_checkpoint: The path to the initial checkpoint of the pre-trained BERT model if any.
Default is None.
:param use_one_hot_embeddings: Boolean. Whether to use one-hot for word embeddings.
Default is False.
:param optimizer: The optimizer used to train the estimator. It should be an instance of
tf.train.Optimizer.
Default is None if no training is involved.
:param model_dir: The output directory for model checkpoints to be written if any.
Default is None.
"""
def __init__(self, num_classes, bert_config_file, init_checkpoint=None,
use_one_hot_embeddings=False, optimizer=None, model_dir=None):
super(BERTClassifier, self).__init__(
model_fn=make_bert_classifier_model_fn(optimizer),
bert_config_file=bert_config_file,
init_checkpoint=init_checkpoint,
use_one_hot_embeddings=use_one_hot_embeddings,
model_dir=model_dir,
num_classes=num_classes)
|
[
"noreply@github.com"
] |
intel-analytics.noreply@github.com
|
73f56c7dd6bb05eaef9d510dce43223ccdce3a6e
|
25a362a695a33a2c5766d56974a541888ac6a783
|
/permeability/scripts/Analyze.py
|
b73d1fc9de7acd6904114fd27edc501922c0ece7
|
[
"MIT"
] |
permissive
|
ahy3nz/permeability
|
86f5137c73cc8df8755e4c560f297df6051e5b3a
|
809b92f15c0a8edb775f95910511079429c5e741
|
refs/heads/master
| 2020-12-14T08:48:00.434712
| 2018-02-27T22:53:25
| 2018-02-27T22:53:25
| 95,495,251
| 0
| 0
| null | 2018-02-15T23:04:51
| 2017-06-26T22:35:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,292
|
py
|
import os
import permeability as prm
import matplotlib.pyplot as plt
import pickle as pickle
from os import system
import pdb
import numpy as np
import mdtraj as mdt
#data_dir = '/Users/rmhartkamp/Dropbox/PostDoc_2_Vanderbilt/Simulation/Permeability/DSPC_C12OH_3_1'
#data_dir = '/raid6/homes/ahy3nz/Trajectories/Data/11_DSPC_C18OH/DSPC-50_alc18-50_5-4a'
data_dir = os.getcwd()
n_sweeps = 26
#n_sweeps = 30
preamble = "folder"
prm.analyze_sweeps(data_dir, timestep=1.0, verbosity=2, directory_prefix='sweep', n_sweeps=n_sweeps, correlation_length=300)
#prm.analyze_sweeps(data_dir, timestep=1000.0, verbosity=2, directory_prefix='sweep', n_sweeps=n_sweeps)
#forcetime = prm.force_timeseries(data_dir, timestep=2.0, n_windows=40, start_window=15, directory_prefix='sweep')
#prm.plot_timeseries(forcetime['time'], forcetime['forces'])
output = prm.analyze_force_acf_data(data_dir, 305.0, timestep=1, verbosity=2, directory_prefix='sweep',n_sweeps=n_sweeps, kB=1.987e-3)
pickle.dump(output, open('{}/output.p'.format(preamble), 'wb'))
#output = pickle.load(open('output.p', 'rb'))
#pdb.set_trace()
#system('rm *.pdf *.png')
prm.plot_forces(output['z'], output['forces'], fig_filename='{}/forces.pdf'.format(preamble),sweep_alpha=0.2)
prm.plot_free_energy_z(output['z'], output['dG'], fig_filename='{}/delta_G.pdf'.format(preamble))
prm.plot_force_acfs_time(output['time'], output['facf_windows'], fig_filename="{}/force_acf.png".format(preamble), normalize=True)
prm.plot_int_acfs_time(output['time'], output['int_facf_windows'], fig_filename="{}/int-acf.png".format(preamble))
prm.plot_symmetrized_free_energy(output['z'], output['dG_sym'],
output['dG_sym_err'],savefig=True, fig_filename="{}/delG-sym.pdf".format(preamble))
prm.plot_sym_diffusion_coefficient_z(output['z'], output['d_z_sym'],
output['d_z_sym_err'],savefig=True, fig_filename="{}/d-sym_z.pdf".format(preamble))
prm.plot_resistance_z(output['z'], output['R_z'], output['R_z_err'], savefig=True, fig_filename="{}/R_z.pdf".format(preamble))
prm.plot_sym_exp_free_energy(output['z'], output['dG_sym'], output['dG_sym_err'], output['d_z_sym'],
output['d_z_sym_err'], output['R_z'], output['R_z_err'], 305,
fig_filename="{}/expdelG-sym.pdf".format(preamble))
print('Permeability (cm/sec): {} ({})'.format(output['permeability'], output['perm_err']))
#prm.plot_forces(output['z'], output['forces'], fig_filename='forces.pdf',sweep_alpha=0.2)
#prm.plot_free_energy_z(output['z'], output['dG'], fig_filename='delta_G.pdf')
#prm.plot_force_acfs_time(output['time'], output['facf_windows'], normalize=True)
#prm.plot_int_acfs_time(output['time'], output['int_facf_windows'])
#prm.plot_symmetrized_free_energy(output['z'], output['dG_sym'],
# output['dG_sym_err'],savefig=True)
#prm.plot_sym_diffusion_coefficient_z(output['z'], output['d_z_sym'],
# output['d_z_sym_err'],savefig=True)
#prm.plot_resistance_z(output['z'], output['R_z'], output['R_z_err'], savefig=True)
#prm.plot_sym_exp_free_energy(output['z'], output['dG_sym'], output['dG_sym_err'], output['d_z_sym'],
# output['d_z_sym_err'], output['R_z'], output['R_z_err'], 305)
#print('Permeability (cm/sec): {} ({})'.format(output['permeability'], output['perm_err']))
##system('open -a preview *.pdf *.png')
|
[
"alexander.h.yang@vanderbilt.edu"
] |
alexander.h.yang@vanderbilt.edu
|
6aaba650193d59b002abe106baec671a25cc7974
|
99f43f4591f63d0c57cd07f07af28c0b554b8e90
|
/python/beckjun/백준_2169_로봇 조종하기_백트래킹.py
|
566123bd84b9a58d89a268e73e253affbd74684f
|
[] |
no_license
|
SINHOLEE/Algorithm
|
049fa139f89234dd626348c753d97484fab811a7
|
5f39d45e215c079862871636d8e0306d6c304f7e
|
refs/heads/master
| 2023-04-13T18:55:11.499413
| 2023-04-10T06:21:29
| 2023-04-10T06:21:29
| 199,813,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
from collections import deque
n, m = map(int, input().split())
mat = [list(map(int, input().split())) for _ in range(n)]
di = (0,0,1)
dj = (1,-1,0)
dp = [[[-1000000001] * 3 for _ in range(m)] for _ in range(n)]
q = deque([(mat[0][0], 0, 0, 0)])
dp[0][0][0] = max(dp[0][0][0], mat[0][0])
c = 0
while q:
cnt, x, y, pre_d = q.popleft()
for k in range(3):
c += 1
newX, newY = x+di[k], y+dj[k]
if pre_d^1 == k:
continue
if not (0 <= newX<n and 0<= newY<m):
continue
if dp[newX][newY][k] < cnt + mat[newX][newY]:
dp[newX][newY][k] = cnt + mat[newX][newY]
q.append((cnt + mat[newX][newY], newX, newY, k))
print(max(dp[n-1][m-1]),c)
|
[
"dltlsgh5@naver.com"
] |
dltlsgh5@naver.com
|
fb05e50ae82b67f2faf5468a91a946c9c47ed1ab
|
193b261c4e5a893a5798ade17dea172af0be8dd4
|
/tools/code_coverage/package/tool/parser/llvm_coverage_segment.py
|
17d7c18975ff94a6a24b56b5638cfea90b9a0633
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
ahirner/pytorch
|
616291823ddab5def3d35d5bd5693d62789bd710
|
fb620a27d08fc5ad00b386505e23e2a51f02366b
|
refs/heads/master
| 2021-07-25T14:19:44.053789
| 2021-06-10T19:25:58
| 2021-06-10T19:29:57
| 153,104,181
| 1
| 2
|
NOASSERTION
| 2021-06-12T06:35:46
| 2018-10-15T11:51:36
|
C++
|
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
from typing import List, NamedTuple, Optional, Tuple
class LlvmCoverageSegment(NamedTuple):
line: int
col: int
segment_count: int
has_count: int
is_region_entry: int
is_gap_entry: Optional[int]
@property
def has_coverage(self) -> bool:
return self.segment_count > 0
@property
def is_executable(self) -> bool:
return self.has_count > 0
def get_coverage(
self, prev_segment: "LlvmCoverageSegment"
) -> Tuple[List[int], List[int]]:
# Code adapted from testpilot.testinfra.runners.gtestcoveragerunner.py
if not prev_segment.is_executable:
return [], []
# this segment ends at the line if col == 1
# (so segment effectively ends on the line) and
# line+1 if col is > 1 (so it touches at least some part of last line).
end_of_segment = self.line if self.col == 1 else self.line + 1
lines_range = list(range(prev_segment.line, end_of_segment))
return (lines_range, []) if prev_segment.has_coverage else ([], lines_range)
def parse_segments(raw_segments: List[List[int]]) -> List[LlvmCoverageSegment]:
"""
Creates LlvmCoverageSegment from a list of lists in llvm export json.
each segment is represented by 5-element array.
"""
ret: List[LlvmCoverageSegment] = []
for raw_segment in raw_segments:
assert (
len(raw_segment) == 5 or len(raw_segment) == 6
), "list is not compatible with llvmcom export:"
" Expected to have 5 or 6 elements"
if len(raw_segment) == 5:
ret.append(
LlvmCoverageSegment(
raw_segment[0],
raw_segment[1],
raw_segment[2],
raw_segment[3],
raw_segment[4],
None,
)
)
else:
ret.append(LlvmCoverageSegment(*raw_segment))
return ret
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
16f59fda29cf8113314e0316016f0a0ce62a268b
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4042/149004042.py
|
a1fcdf274ced8b3da938a656143708a70d73b544
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,788
|
py
|
from bots.botsconfig import *
from records004042 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'NT',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99999},
{ID: 'TDS', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 2},
]},
{ID: 'TFS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'FGS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'TIA', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'PCT', MIN: 0, MAX: 99999},
{ID: 'AMT', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99999},
]},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
8b15c70ba21396494d75e7deb3f8b022da626582
|
1403d8670aa2e3ad6b72524688ca6318a84cd1f2
|
/Chapter 12/nest_egg_mcs_1st_5yrs.py
|
a61e2eff6343367a5e2e3d49f1a142488cc3f307
|
[] |
no_license
|
afettouhi/ImpracticalPythonProjects-py38
|
033037a9b9b4af1401b968fa6f51fa0adde8be3f
|
074b9f8b77d72cac2bb33f57a918c509a4f0ef17
|
refs/heads/master
| 2023-01-07T08:05:42.849002
| 2020-11-02T06:12:58
| 2020-11-02T06:12:58
| 304,799,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,641
|
py
|
"""
Retirement nest egg calculator using Monte Carlo simulation.
"""
import sys
import random
import matplotlib.pyplot as plt
def read_to_list(file_name):
"""
Open a file of data in percent, convert to decimal & return a list.
"""
with open(file_name) as in_file:
lines = [float(line.strip()) for line in in_file]
decimal = [round(line / 100, 5) for line in lines]
return decimal
def default_input(prompt, default=None):
"""
Allow use of default values in input
"""
prompt = '{} [{}]: '.format(prompt, default)
response = input(prompt)
if not response and default:
return default
else:
return response
# load data files with original data in percent form
print("\nNote: Input data should be in percent, not decimal!\n")
try:
bonds = read_to_list('../data/10-yr_TBond_returns_1926-2013_pct.txt')
stocks = read_to_list('../data/SP500_returns_1926-2013_pct.txt')
blend_40_50_10 = read_to_list('../data/S-B-C_blend_1926-2013_pct.txt')
blend_50_50 = read_to_list('../data/S-B_blend_1926-2013_pct.txt')
infl_rate = read_to_list('../data/annual_infl_rate_1926-2013_pct.txt')
except IOError as e:
print("{}. \nTerminating program.".format(e), file=sys.stderr)
sys.exit(1)
# get user input; use dictionary for investment-type arguments
investment_type_args = {'bonds': bonds, 'stocks': stocks,
'sb_blend': blend_50_50, 'sbc_blend': blend_40_50_10}
# print input legend for user
print(" stocks = SP500")
print(" bonds = 10-yr Treasury Bond")
print(" sb_blend = 50% SP500/50% TBond")
print("sbc_blend = 40% SP500/50% TBond/10% Cash\n")
print("Press ENTER to take default value shown in [brackets]. \n")
# get user input
invest_type = default_input("Enter investment type: (stocks, bonds, sb_blend," \
" sbc_blend): \n", 'bonds').lower()
while invest_type not in investment_type_args:
invest_type = input("Invalid investment. Enter investment type " \
"as listed in prompt: ")
start_value = default_input("Input starting value of investments: \n", \
'2000000')
while not start_value.isdigit():
start_value = input("Invalid input! Input integer only: ")
withdrawal_1 = default_input("Input annual pre-tax withdrawal for " \
"first 5 yrs(today's $): \n", '100000')
while not withdrawal_1.isdigit():
withdrawal_1 = input("Invalid input! Input integer only: ")
withdrawal_2 = default_input("Input annual pre-tax withdrawal for " \
"remainder (today's $): \n", '80000')
while not withdrawal_2.isdigit():
withdrawal_2 = input("Invalid input! Input integer only: ")
min_years = default_input("Input minimum years in retirement: \n", '18')
while not min_years.isdigit():
min_years = input("Invalid input! Input integer only: ")
most_likely_years = default_input("Input most-likely years in retirement: \n",
'25')
while not most_likely_years.isdigit():
most_likely_years = input("Invalid input! Input integer only: ")
max_years = default_input("Input maximum years in retirement: \n", '40')
while not max_years.isdigit():
max_years = input("Invalid input! Input integer only: ")
num_cases = default_input("Input number of cases to run: \n", '50000')
while not num_cases.isdigit():
num_cases = input("Invalid input! Input integer only: ")
# check for other erroneous input
if not int(min_years) < int(most_likely_years) < int(max_years) \
or int(max_years) > 99:
print("\nProblem with input years.", file=sys.stderr)
print("Requires Min < ML < Max & Max <= 99.", file=sys.stderr)
sys.exit(1)
def montecarlo(returns):
"""
Run MCS & return investment value at death & and # of times bankrupt.
"""
case_count = 0
bankrupt_count = 0
outcome = []
while case_count < int(num_cases):
investments = int(start_value)
start_year = random.randrange(0, len(returns))
duration = int(random.triangular(int(min_years), int(max_years),
int(most_likely_years)))
end_year = start_year + duration
lifespan = [i for i in range(start_year, end_year)]
bankrupt = 'no'
# build temporary lists for each case
lifespan_returns = []
lifespan_infl = []
for i in lifespan:
lifespan_returns.append(returns[i % len(returns)])
lifespan_infl.append(infl_rate[i % len(infl_rate)])
# loop through each year of retirement for each case run
for index, i in enumerate(lifespan_returns):
infl = lifespan_infl[index]
# don't adjust for inflation the first year
if index == 0:
withdraw_infl_adj_1 = int(withdrawal_1)
withdraw_infl_adj_2 = int(withdrawal_2)
else:
withdraw_infl_adj_1 = int(withdraw_infl_adj_1 * (1 + infl))
withdraw_infl_adj_2 = int(withdraw_infl_adj_2 * (1 + infl))
if index < 5:
withdraw_infl_adj = withdraw_infl_adj_1
else:
withdraw_infl_adj = withdraw_infl_adj_2
investments -= withdraw_infl_adj
investments = int(investments * (1 + i))
if investments <= 0:
bankrupt = 'yes'
break
if bankrupt == 'yes':
outcome.append(0)
bankrupt_count += 1
else:
outcome.append(investments)
case_count += 1
return outcome, bankrupt_count
def bankrupt_prob(outcome, bankrupt_count):
"""
Calculate & return chance of running out of money & print statistics.
"""
total = len(outcome)
odds = round(100 * bankrupt_count / total, 1)
print("\nInvestment type: {}".format(invest_type))
print("Starting value: ${:,}".format(int(start_value)))
print("Annual withdrawal first 5 yrs: ${:,}".format(int(withdrawal_1)))
print("Annual withdrawal after 5 yrs: ${:,}".format(int(withdrawal_2)))
print("Years in retirement (min-ml-max): {}-{}-{}"
.format(min_years, most_likely_years, max_years))
print("Number of runs: {:,}\n".format(len(outcome)))
print("Odds of running out of money: {}%\n".format(odds))
print("Average outcome: ${:,}".format(int(sum(outcome) / total)))
print("Minimum outcome: ${:,}".format(min(i for i in outcome)))
print("Maximum outcome: ${:,}".format(max(i for i in outcome)))
return odds
def main():
"""
Run the program and draw bar chart of results.
"""
outcome, bankrupt_count = montecarlo(investment_type_args[invest_type])
odds = bankrupt_prob(outcome, bankrupt_count)
# generate matplotlib bar chart
plotdata = outcome[:3000] # only plot first 3000 runs
plt.figure('Outcome by Case (showing first {} runs)'.format(len(plotdata)),
figsize=(16, 5)) # size is width, height in inches
index = [i + 1 for i in range(len(plotdata))]
plt.bar(index, plotdata, color='black')
plt.xlabel('Simulated Lives', fontsize=18)
plt.ylabel('$ Remaining', fontsize=18)
plt.ticklabel_format(style='plain', axis='y')
ax = plt.gca()
ax.get_yaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}"
.format(int(x))))
plt.title('Probability of running out of money = {}%'.format(odds),
fontsize=20, color='red')
plt.show()
# run program
if __name__ == '__main__':
main()
|
[
"A.Fettouhi@gmail.com"
] |
A.Fettouhi@gmail.com
|
25f0f93e00812237893b74b780d979cce30a1d81
|
5a1eea357eb4fc0c5a1d1478e69ff7779b8a686a
|
/test/view_helpers/test_Maps_Views.py
|
34f1185e2d51515ee91331a2041ac2d85e7a2bc9
|
[
"Apache-2.0"
] |
permissive
|
dariovillalon/OSBot-browser
|
77059951bb0d2bc6bae13559e17fc84f5268d837
|
bcb7e9f4b8c6980afb32d9ba1299c20fc744bf9d
|
refs/heads/master
| 2021-05-18T20:05:01.597025
| 2020-03-29T23:17:27
| 2020-03-29T23:17:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
from unittest import TestCase
from osbot_aws.apis.Lambda import Lambda
from pbx_gs_python_utils.utils.Dev import Dev
from osbot_browser.browser.Browser_Lamdba_Helper import Browser_Lamdba_Helper
from osbot_browser.view_helpers.Maps_Views import Maps_Views
from gw_bot.Deploy import Deploy
class Test_Maps_Views(TestCase):
def setUp(self):
Deploy().setup() # set local ossbot environment
self.maps_views = Maps_Views()
self.png_data = None
self.result = None
def tearDown(self):
if self.result:
Dev.pprint(self.result)
if self.png_data:
Browser_Lamdba_Helper().save_png_data(self.png_data)
def test_default(self):
self.result = self.maps_views.default(headless=False)#,channel='DJ8UA0RFT')
def test_exec_js(self):
channel = 'DJ8UA0RFT'
channel = None
params = ["maps.add_component('aaa 123' , 2, 1)"]
self.result = self.maps_views.exec_js(headless=False ,channel=channel, params=params)
def test_via_lambda_execution(self):
self.test_update_lambda()
view = 'default'
code = ''
aws_lambda = Lambda('osbot_browser.lambdas.lambda_browser')
payload = {"params": ["maps", view, code],
'data': { 'channel' : 'DJ8UA0RFT'}}
self.result = aws_lambda.invoke(payload)
def test_via_lambda_execution__version(self):
self.test_update_lambda()
aws_lambda = Lambda('osbot_browser.lambdas.lambda_browser')
payload = {"params": ["maps", "version"],'data': {}}
self.result = aws_lambda.invoke(payload)
def test_update_lambda_browser(self):
Deploy().setup().deploy_lambda__browser()
def test_update_lambda_oss_bot(self):
Deploy().setup().deploy_lambda__gw_bot()
|
[
"dinis.cruz@owasp.org"
] |
dinis.cruz@owasp.org
|
e6f1611a6eb8c993d49fa2bcc2526dd8bfc0c6dc
|
6d4e31ef0e828db01775dbb22a43b11680f86059
|
/mysql_proto/plugins/plugin.py
|
ba82307a5223e7938bcaaa46ecdaa8922781fd69
|
[
"MIT"
] |
permissive
|
alvinzane/PyMP
|
a81a1e07a962604f96d5c68ef89cbf9947cbbccf
|
cfe426dbca4afae95714d6026903d2678a46ddc4
|
refs/heads/master
| 2020-04-07T20:12:49.004874
| 2019-01-05T15:18:43
| 2019-01-05T15:18:43
| 158,679,588
| 0
| 0
| null | 2018-11-22T10:08:29
| 2018-11-22T10:08:29
| null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
# coding=utf-8
class Plugin(object):
def init(self, context):
raise NotImplementedError()
def read_handshake(self, context):
raise NotImplementedError()
def send_handshake(self, context):
raise NotImplementedError()
def read_auth(self, context):
raise NotImplementedError()
def send_auth(self, context):
raise NotImplementedError()
def read_auth_result(self, context):
raise NotImplementedError()
def send_auth_result(self, context):
raise NotImplementedError()
def read_query(self, context):
raise NotImplementedError()
def send_query(self, context):
raise NotImplementedError()
def read_query_result(self, context):
raise NotImplementedError()
def send_query_result(self, context):
raise NotImplementedError()
def cleanup(self, context):
raise NotImplementedError()
def shutdown(self, context):
raise NotImplementedError()
|
[
"kormoc@gmail.com"
] |
kormoc@gmail.com
|
594a901265ccc41397cbed360de2d2e85a9e531b
|
91da8a59561d6f2c7852c0548298434e0ede2ac7
|
/Tree/Construct_Binary_Tree_from_Preorder_and_Postorder_Traversal.py
|
2c68547cd8733c752bd84ee60f23b160ede1ed3f
|
[] |
no_license
|
prashant97sikarwar/leetcode
|
6d3828772cc426ccf53dad07edb1efbc2f1e1ded
|
e76054e27a5d4493bd1bcef2ebdeb21d257afb63
|
refs/heads/master
| 2023-08-23T05:06:23.181869
| 2021-10-28T18:19:10
| 2021-10-28T18:19:10
| 286,057,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
#Problem Link :- https://leetcode.com/problems/construct-binary-tree-from-preorder-and-postorder-traversal/
"""Return any binary tree that matches the given preorder and postorder traversals.
Values in the traversals pre and post are distinct positive integers."""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def constructFromPrePost(self, pre,post):
if not pre:
return
root = TreeNode(pre[0])
if len(pre) == 1:
return root
L = post.index(pre[1]) + 1
root.left = self.constructFromPrePost(pre[1:L+1],post[:L])
root.right = self.constructFromPrePost(pre[L+1:],post[L:-1])
return root
|
[
"prashant97sikarwar@gmail.com"
] |
prashant97sikarwar@gmail.com
|
8ede1ff4e6d2f58b5266da7f5c33d2592bcb46ca
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp-obt/sblp_ut=3.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=29/params.py
|
8755b15b27c0b917e2860a10c7ce53504eaaa02b
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.547667',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.8',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 29,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
391dd31b81e74cd0e0f234e71ed4001eaee94ad6
|
0478abafc05f1dd55ddf6054d95fef73e9fa03e9
|
/quati/features.py
|
30821e305cafde7bfdb2a894b5c9975c70a85fb5
|
[
"MIT"
] |
permissive
|
deep-spin/quati
|
89bce0868b36b0d7902659507b72acfbd01ada98
|
62a6769475090182fe2990b2864d66f8e2081a32
|
refs/heads/master
| 2023-03-12T09:22:31.520259
| 2021-03-02T15:13:22
| 2021-03-02T15:13:22
| 330,678,540
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,683
|
py
|
"""Useful for sequence tagging tasks like POS or NER."""
from quati import opts
from quati.dataset.fields.affixes import AffixesField
from quati.dataset.fields.caps import CapsField
def build(options):
prefixes_field = AffixesField()
suffixes_field = AffixesField()
caps_field = CapsField()
fields_tuples = []
if options.use_prefixes:
fields_tuples.append(('prefixes', prefixes_field))
if options.use_suffixes:
fields_tuples.append(('suffixes', suffixes_field))
if options.use_caps:
fields_tuples.append(('caps', caps_field))
return fields_tuples
def load(path):
options = opts.load(path)
return build(options)
class Caps:
all_upper = 'UPPER' # acronyms
all_lower = 'LOWER' # normal words
first_upper = 'FIRST' # names, titles
non_alpha = 'NON_ALPHA' # dates, hours, punctuations
other = 'OTHER' # any other
def extract_prefixes(words, min_length, max_length):
return extract_affixes(words, min_length, max_length, affix_type='prefix')
def extract_suffixes(words, min_length, max_length):
return extract_affixes(words, min_length, max_length, affix_type='suffix')
def extract_affixes(words, min_length, max_length, affix_type='prefix'):
total_length = max_length - min_length + 1
pad_token = '<pad-{}>'.format(affix_type)
def fill_with_pad(v):
for _ in range(total_length - len(v)):
v.append(pad_token)
new_words = []
for sentence in words:
tokens = sentence.split()
affixes_tokens = []
for token in tokens:
affixes = []
if len(token) >= min_length:
i, j = min_length, min(max_length, len(token))
for k in range(i, j + 1):
affix = token[:k] if affix_type == 'prefix' else token[-k:]
affixes.append(affix)
fill_with_pad(affixes)
affixes_tokens.extend(affixes)
new_words.append(' '.join(affixes_tokens))
return new_words
def extract_caps(words):
new_words = []
for sentence in words:
tokens = sentence.split()
caps_tokens = []
for token in tokens:
if not token.isalpha():
caps_tokens.append(Caps.non_alpha)
elif token.isupper():
caps_tokens.append(Caps.all_upper)
elif token.islower():
caps_tokens.append(Caps.all_lower)
elif token[0].isupper() and token[1:].islower():
caps_tokens.append(Caps.first_upper)
else:
caps_tokens.append(Caps.other)
new_words.append(caps_tokens)
return new_words
|
[
"marcosvtreviso@gmail.com"
] |
marcosvtreviso@gmail.com
|
80d6da4ad8540d5da3fd49cc9e8e50e91ea0b833
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03673/s956353086.py
|
bb1c4270936c6d0fa8f2322285846a5e4b4a2c72
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
n = int(input())
a = list(map(int,input().rstrip().split(" ")))
b1 = []
b2 = []
ans = []
if n % 2 == 0:
for i in range(n):
if i % 2 == 0:
b1.append(a[i])
else:
b2.append(a[i])
b2.reverse()
ans = b2 + b1
print(" ".join(str(i) for i in ans))
else:
for i in range(n):
if i % 2 == 0:
b1.append(a[i])
else:
b2.append(a[i])
b1.reverse()
ans = b1 + b2
print(" ".join(str(i) for i in ans))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7a98373803beb4dd79354dd8d95b1d9369c097d6
|
9eec6ca0e14c50298d0ecaa6e2f75b8a7c0f76c4
|
/resolwe/flow/migrations/0015_refactor_relations_1.py
|
96ddbc7acd5f125e651bdcc17c9c5d09d0c47f30
|
[
"Apache-2.0"
] |
permissive
|
mzagmajster/resolwe
|
81e65fca94bd14c59b6da718e2f6c4c0b41481b1
|
da371a3ec0260a45ccab848704c6a339a0de79cc
|
refs/heads/master
| 2022-02-20T05:23:41.953200
| 2019-08-27T16:36:25
| 2019-08-27T16:36:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,768
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-06 02:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('flow', '0014_track_resources'),
]
operations = [
migrations.RenameModel(
old_name='PositionInRelation',
new_name='RelationPartition',
),
migrations.RenameField(
model_name='relation',
old_name='label',
new_name='category',
),
migrations.AlterField(
model_name='relation',
name='category',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='relation',
name='unit',
field=models.CharField(blank=True, choices=[('s', 'Second'), ('min', 'Minute'), ('hr', 'Hour'), ('d', 'Day'), ('wk', 'Week')], max_length=3, null=True),
),
migrations.AlterField(
model_name='relation',
name='collection',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='flow.Collection'),
),
migrations.AlterUniqueTogether(
name='relation',
unique_together=set([('collection', 'category')]),
),
migrations.AddField(
model_name='relationpartition',
name='label',
field=models.CharField(blank=True, db_index=True, max_length=30, null=True),
),
migrations.AlterUniqueTogether(
name='relationpartition',
unique_together=set([('entity', 'relation')]),
),
]
|
[
"domen@blenkus.com"
] |
domen@blenkus.com
|
e9e391dcd37dfd9a072ffaaf80a20be09106b74f
|
8578ae5be776b49559fa95ce30f6b45b6a82b73a
|
/test/functional/feature_filelock.py
|
c25d6215ebf2c07361b684e43b155e7f75109040
|
[
"MIT"
] |
permissive
|
devcoin/core
|
3f9f177bd9d5d2cc54ff95a981cfe88671206ae2
|
f67e8b058b4316dd491615dc3f8799a45f396f4a
|
refs/heads/master
| 2023-05-25T03:42:03.998451
| 2023-05-24T07:59:22
| 2023-05-24T08:02:14
| 21,529,485
| 16
| 13
|
MIT
| 2022-01-07T17:04:18
| 2014-07-05T22:42:13
|
C
|
UTF-8
|
Python
| false
| false
| 2,539
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core and Devcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check that it's not possible to start a second devcoind instance using the same datadir or wallet."""
import os
import random
import string
from test_framework.test_framework import DevcoinTestFramework
from test_framework.test_node import ErrorMatch
class FilelockTest(DevcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=None)
self.nodes[0].start()
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, self.chain)
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second devcoind instance using the same datadir")
expected_msg = "Error: Cannot obtain a lock on data directory {0}. {1} is probably already running.".format(datadir, self.config['environment']['PACKAGE_NAME'])
self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
def check_wallet_filelock(descriptors):
wallet_name = ''.join([random.choice(string.ascii_lowercase) for _ in range(6)])
self.nodes[0].createwallet(wallet_name=wallet_name, descriptors=descriptors)
wallet_dir = os.path.join(datadir, 'wallets')
self.log.info("Check that we can't start a second devcoind instance using the same wallet")
if descriptors:
expected_msg = "Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another devcoind?"
else:
expected_msg = "Error: Error initializing wallet database environment"
self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-wallet=' + wallet_name, '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if self.is_bdb_compiled():
check_wallet_filelock(False)
if self.is_sqlite_compiled():
check_wallet_filelock(True)
if __name__ == '__main__':
FilelockTest().main()
|
[
"fernando@develcuy.com"
] |
fernando@develcuy.com
|
e8b4f296e257361089611d6c61f6e971a91144f8
|
4feaf520374804d6f3feebe3700fb448692a44ba
|
/pullenti/ner/booklink/internal/EpNerBooklinkInternalResourceHelper.py
|
903e0b5ab200e9414e8adbf15e7a5c0b1f55b3a4
|
[] |
no_license
|
MihaJjDa/APCLtask
|
f7be3fb6b0f31801196bf779f6a7e62ce245493b
|
4745b45e199887d433ab256bb2e2ebf5dbe3f7cd
|
refs/heads/master
| 2020-04-16T17:15:10.846647
| 2020-02-24T16:06:43
| 2020-02-24T16:06:43
| 165,769,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
# Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
import io
from pullenti.unisharp.Utils import Utils
class EpNerBooklinkInternalResourceHelper:
""" Это для поддержки получения встроенных ресурсов """
@staticmethod
def getBytes(name : str) -> bytearray:
""" Получить встроенный ресурс
Args:
name(str): имя, на который оканчивается ресурс
"""
# ignored: assembly = EpNerBooklinkInternalResourceHelper.
names = Utils.getResourcesNames('pullenti.ner.booklink.properties', '.png')
for n in names:
if (Utils.endsWithString(n, name, True)):
try:
inf = Utils.getResourceInfo('pullenti.ner.booklink.properties', n)
if (inf is None):
continue
with Utils.getResourceStream('pullenti.ner.booklink.properties', n) as stream:
buf = Utils.newArrayOfBytes(Utils.getLengthIO(stream), 0)
Utils.readIO(stream, buf, 0, len(buf))
return buf
except Exception as ex:
pass
return None
@staticmethod
def getString(name : str) -> str:
arr = EpNerBooklinkInternalResourceHelper.getBytes(name)
if (arr is None):
return None
if ((len(arr) > 3 and arr[0] == (0xEF) and arr[1] == (0xBB)) and arr[2] == (0xBF)):
return arr[3:3+len(arr) - 3].decode("UTF-8", 'ignore')
else:
return arr.decode("UTF-8", 'ignore')
|
[
"danila.puchkin@mail.ru"
] |
danila.puchkin@mail.ru
|
89525c39e243900cdcdb007d0a3d31922db4db1d
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/ml/azure-ai-ml/azure/ai/ml/_schema/_feature_set/feature_set_specification_schema.py
|
761c250e8023cf2bbbee0291d9fdcfb5aa45d48e
|
[
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 660
|
py
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=unused-argument,no-self-use
from marshmallow import fields, post_load
from azure.ai.ml._schema.core.schema import PatchedSchemaMeta
class FeatureSetSpecificationSchema(metaclass=PatchedSchemaMeta):
path = fields.Str(required=True, allow_none=False)
@post_load
def make(self, data, **kwargs):
from azure.ai.ml.entities._feature_set.feature_set_specification import FeatureSetSpecification
return FeatureSetSpecification(**data)
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
d3f2ade7125fbc5347a5c7961cb691e2c8b5d211
|
a5826609647289ca4c3406a99114da0abacacc2f
|
/vycontrol/config/models.py
|
010b353f1efba2c54df058bcac17af6d89cad9c8
|
[
"MIT"
] |
permissive
|
Akmon47/vycontrol
|
a18f50dcaeaf350525858b20bd33e4d6ac2ee7a0
|
898c2ec0e5d315f89c37f07e87a6bc1b096b0e8e
|
refs/heads/master
| 2023-03-29T16:42:03.361002
| 2021-04-05T07:38:52
| 2021-04-05T07:38:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
from django.db import models
from django.contrib.auth.models import Group
class Instance(models.Model):
hostname = models.CharField(max_length=120, primary_key=True)
alias = models.CharField(max_length=30)
port = models.IntegerField()
key = models.CharField(max_length=100)
https = models.BooleanField()
main = models.BooleanField(default=False)
group = models.ForeignKey(Group, null=True, on_delete=models.SET_NULL)
Group.add_to_class('active', models.BooleanField(default=True))
|
[
"roberto.berto@gmail.com"
] |
roberto.berto@gmail.com
|
fe0dca761fc20048bba333252839b707f4c21d9d
|
1cf380b819a399c3f58a7ad13f5daeb5659cead3
|
/wrf_management/modules/compress_cli.py
|
ddd637fdc060ad1c63957c75b5fa0d272afc416e
|
[] |
no_license
|
daliagachc/wrf_management
|
dd88cf5d6279457f4e2b414acfa0d0cbaaad3873
|
4ee88c668ed0252e68713aa756b74344ecada615
|
refs/heads/master
| 2021-06-13T09:39:08.477315
| 2021-04-09T14:43:21
| 2021-04-09T14:43:21
| 171,271,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
# project name: wrf_management
# created by diego aliaga daliaga_at_chacaltaya.edu.bo
# todo still under construction
import sys
import wrf_management.modules.CompressOut as CO
import os
zip_path = os.path.join(os.path.dirname(sys.argv[1]),'compresser_log_xxx')
input_dic = dict(
source_path = sys.argv[1],
zip_path = zip_path,
db_path = os.path.join(zip_path,f'zip{CO.get_unique_id()}.sqlite'),
lock_last_date = False,
source_path_is_file = True,
compress_level_target = 4
)
if __name__=="__main__":
co = CO.Compresser(**input_dic)
co.get_and_zip_next_row(move=True)
|
[
"diego.aliaga@helsinki.fi"
] |
diego.aliaga@helsinki.fi
|
dc44b2f609558f860a5b9afec10dfdf9fedc3d50
|
78b7b3e27553ccf0b89c24cbd11662600db26b4c
|
/ScrapeNASAPicDayWebsite/.history/scraper_20190701160044.py
|
59d3cf70adc9b86d844ece27249a8b2220f9b34b
|
[] |
no_license
|
web3-qa/intermediatePython
|
2c23408bd6d6dffc070b92e1155d3c072cfe040c
|
b4791db2bcb59aaf9c447cf50ffd4d21cacbe16b
|
refs/heads/master
| 2023-02-08T14:18:54.288227
| 2019-07-18T13:31:23
| 2019-07-18T13:31:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
import urllib.request
import urllib.parse import urljoin
from bs4 import BeautifulSoup
baseURL = "http://apod.nasa.gov/apod/archivepix.html"
content = urllib.request.urlopen(baseURL).read()
BeautifulSoup(content, "lxml").findAll("a")
|
[
"dcolmer@statestreet.com"
] |
dcolmer@statestreet.com
|
4646d73a7577d2a17ca8a2c885d77ab9eac866e3
|
3670f2ca6f5609e14cce8c31cb1348052d0b6358
|
/xacro/geometry_tutorials/turtle_tf2/nodes/dynamic_tf2_broadcaster.py
|
19ed163e4729d54006864bd9232a374a93da9573
|
[] |
no_license
|
jincheng-ai/ros-melodic-python3-opencv4
|
b0f4d3860ab7ae3d683ade8aa03e74341eff7fcf
|
47c74188560c2274b8304647722d0c9763299a4b
|
refs/heads/main
| 2023-05-28T17:37:34.345164
| 2021-06-17T09:59:25
| 2021-06-17T09:59:25
| 377,856,153
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,038
|
py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import tf2_ros
import geometry_msgs.msg
import math
if __name__ == '__ STATIC main__':
rospy.init_node('my_tf2_broadcaster')
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
t.header.frame_id = "turtle1"
t.child_frame_id = "carrot1"
t.transform.translation.x = 0.0
t.transform.translation.y = 2.0
t.transform.translation.z = 0.0
t.transform.rotation.x = 0.0
t.transform.rotation.y = 0.0
t.transform.rotation.z = 0.0
t.transform.rotation.w = 1.0
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
t.header.stamp = rospy.Time.now()
br.sendTransform(t)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('my_tf2_broadcaster')
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
t.header.frame_id = "turtle1"
t.child_frame_id = "carrot1"
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
x = rospy.Time.now().to_sec() * math.pi
t.header.stamp = rospy.Time.now()
t.transform.translation.x = 10 * math.sin(x)
t.transform.translation.y = 10 * math.cos(x)
t.transform.translation.z = 0.0
t.transform.rotation.x = 0.0
t.transform.rotation.y = 0.0
t.transform.rotation.z = 0.0
t.transform.rotation.w = 1.0
br.sendTransform(t)
rate.sleep()
|
[
"shuyuanhao@cetiti.com"
] |
shuyuanhao@cetiti.com
|
2be0c0e3c831afe398c67d50454cbc3f21f0f197
|
68c4805ad01edd612fa714b1e0d210115e28bb7d
|
/CoreSource/speechRec.py
|
56cbe4229ae9f989177c560c5608cfb22f72d205
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Happy-Egg/redesigned-happiness
|
ac17a11aecc7459f4ebf0afd7d43de16fb37ae2c
|
08b705e3569f3daf31e44254ebd11dd8b4e6fbb3
|
refs/heads/master
| 2022-12-28T02:40:21.713456
| 2020-03-03T09:04:30
| 2020-03-03T09:04:30
| 204,904,444
| 2
| 1
|
Apache-2.0
| 2022-12-08T06:19:04
| 2019-08-28T10:18:05
|
Python
|
UTF-8
|
Python
| false
| false
| 561
|
py
|
# coding=<encoding name>
from aip import AipSpeech
# 百度智能云账户参数配置
APP_ID = '17134145'
API_KEY = 'yDTW0ljcQd24ZKyaHYRTDleX'
SECRET_KEY = 'O6de7NZmhxd6KZILjZj2oHoqITdRoHyg'
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
# 读取文件
def get_file_content(file_path):
with open(file_path, 'rb') as fp:
return fp.read()
# 识别本地文件
def do_tts(file_path, file_format):
ret = client.asr(get_file_content(file_path), file_format, 16000, {
'dev_pid': 1537,
})
return ret['result'][0].encode('utf-8')
|
[
"yangyang4910709@163.com"
] |
yangyang4910709@163.com
|
b6dfbdbb85b92e08ea92fb40b6b8d4f6337b17e6
|
525c6a69bcf924f0309b69f1d3aff341b06feb8e
|
/sunyata/layer/recurrent/gru.py
|
acaaf611f7d5a7031bf259e99180d2c6b3d077f7
|
[] |
no_license
|
knighton/sunyata_2017
|
ba3af4f17184d92f6277d428a81802ac12ef50a4
|
4e9d8e7d5666d02f9bb0aa9dfbd16b7a8e97c1c8
|
refs/heads/master
| 2021-09-06T13:19:06.341771
| 2018-02-07T00:28:07
| 2018-02-07T00:28:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
from ... import backend as Z
from ... import init
from ..base import LinkBuilder
from .base import RecurrentLayer, RecurrentSpec
class GRULayer(RecurrentLayer):
def __init__(self, forward, last, input_kernel, recurrent_kernel, bias):
dim = input_kernel.shape[1] // 3
dtype = input_kernel.dtype.name
super().__init__(dim, dtype, forward, last)
self.input_kernel = self.add_param(input_kernel)
self.recurrent_kernel = self.add_param(recurrent_kernel)
self.bias = self.add_param(bias)
i = 2 * self.out_dim
self.reset_update_input_kernel = self.input_kernel[:, :i]
self.reset_update_recurrent_kernel = self.recurrent_kernel[:, :i]
self.reset_update_bias = self.bias[:i]
self.new_input_kernel = self.input_kernel[:, i:]
self.new_recurrent_kernel = self.recurrent_kernel[:, i:]
self.new_bias = self.bias[i:]
def step(self, x, prev_state, prev_internal_state):
gates = Z.sigmoid(
Z.matmul(x, self.reset_update_input_kernel) +
Z.matmul(prev_state, self.reset_update_recurrent_kernel) +
self.reset_update_bias)
i = self.out_dim
reset_gate = gates[:, :i]
update_gate = gates[:, i:2 * i]
new_state = Z.tanh(
Z.matmul(x, self.new_input_kernel) +
Z.matmul(reset_gate * prev_state, self.new_recurrent_kernel) +
self.new_bias)
state = update_gate * prev_state + (1 - update_gate) * new_state
return state, None
class GRUSpec(RecurrentSpec):
def __init__(self, dim=None, forward=True, last=False,
input_kernel_init='glorot_uniform',
recurrent_kernel_init='orthogonal', bias_init='zeros'):
super().__init__(dim, forward, last)
self.input_kernel_init = init.get(input_kernel_init)
self.recurrent_kernel_init = init.get(recurrent_kernel_init)
self.bias_init = init.get(bias_init)
def make_layer(self, in_dim, out_dim, dtype):
input_kernel_shape = in_dim, 3 * out_dim
input_kernel = self.input_kernel_init(
input_kernel_shape, dtype, 'conv_kernel')
recurrent_kernel_shape = out_dim, 3 * out_dim
recurrent_kernel = self.recurrent_kernel_init(
recurrent_kernel_shape, dtype)
bias_shape = 3 * out_dim,
bias = self.bias_init(bias_shape, dtype)
return GRULayer(self.go_forward, self.ret_last, input_kernel,
recurrent_kernel, bias)
GRU = LinkBuilder(GRUSpec)
|
[
"iamknighton@gmail.com"
] |
iamknighton@gmail.com
|
4a0ccc2edeb0ad94a72db5238966a8e5fe4d0216
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/compute/networks_utils.py
|
20c83238fe96135876a0ed78af16ad4cb15c5574
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908
| 2020-12-24T00:36:28
| 2020-12-24T00:36:28
| 79,285,433
| 1
| 1
|
Apache-2.0
| 2020-12-24T00:38:09
| 2017-01-18T00:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code that's shared between multiple networks subcommands."""
def _GetNetworkMode(network):
"""Takes a network resource and returns the "mode" of the network."""
if network.get('IPv4Range', None) is not None:
return 'legacy'
if network.get('autoCreateSubnetworks', False):
return 'auto'
else:
return 'custom'
def AddMode(items):
for resource in items:
resource['x_gcloud_mode'] = _GetNetworkMode(resource)
yield resource
|
[
"toork@uw.edu"
] |
toork@uw.edu
|
aeccd1e46dde4148b0683a43aaf35d79b4119967
|
5932b41c973fb4f0d61ea2668a3036bd2af31903
|
/ue4docker/infrastructure/BuildConfiguration.py
|
30d53fc7710820b4646512a3355a42b508a2ec20
|
[
"MIT"
] |
permissive
|
hackertron/ue4-docker
|
d711290fba75cfdf4509762cd301dec7796191f5
|
f849ae89f75644c5f34276e8ebe76ef03528029c
|
refs/heads/master
| 2020-04-19T22:51:26.502673
| 2019-01-29T08:48:54
| 2019-01-29T08:48:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,589
|
py
|
from .PackageUtils import PackageUtils
from .WindowsUtils import WindowsUtils
import humanfriendly, os, platform, random
# Import the `semver` package even when the conflicting `node-semver` package is present
semver = PackageUtils.importFile('semver', os.path.join(PackageUtils.getPackageLocation('semver'), 'semver.py'))
# The default Unreal Engine git repository
DEFAULT_GIT_REPO = 'https://github.com/EpicGames/UnrealEngine.git'
# The base images for Linux containers
LINUX_BASE_IMAGES = {
'opengl': 'nvidia/opengl:1.0-glvnd-devel-ubuntu18.04',
'cudagl': {
'9.2': 'nvidia/cudagl:9.2-devel-ubuntu18.04',
'10.0': 'nvidia/cudagl:10.0-devel-ubuntu18.04'
}
}
# The default CUDA version to use when `--cuda` is specified without a value
DEFAULT_CUDA_VERSION = '9.2'
# The default memory limit (in GB) under Windows
DEFAULT_MEMORY_LIMIT = 10.0
class BuildConfiguration(object):
def __init__(self, args):
'''
Creates a new build configuration based on the supplied arguments object
'''
# Determine if we are building a custom version of UE4 rather than an official release
if args.release.lower() == 'custom':
# Both a custom repository and a custom branch/tag must be specified
if args.repo is None or args.branch is None:
raise RuntimeError('both a repository and branch/tag must be specified when building a custom version of the Engine')
# Use the specified repository and branch/tag
self.release = 'custom'
self.repository = args.repo
self.branch = args.branch
else:
# Validate the specified version string
try:
ue4Version = semver.parse(args.release)
if ue4Version['major'] != 4 or ue4Version['prerelease'] != None:
raise Exception()
self.release = semver.format_version(ue4Version['major'], ue4Version['minor'], ue4Version['patch'])
except:
raise RuntimeError('invalid UE4 release number "{}", full semver format required (e.g. "4.19.0")'.format(args.release))
# Use the default repository and the release tag for the specified version
self.repository = DEFAULT_GIT_REPO
self.branch = '{}-release'.format(self.release)
# Store our common configuration settings
self.containerPlatform = 'windows' if platform.system() == 'Windows' and args.linux == False else 'linux'
self.dryRun = args.dry_run
self.rebuild = args.rebuild
self.pullPrerequisites = args.pull_prerequisites
self.noEngine = args.no_engine
self.noMinimal = args.no_minimal
self.noFull = args.no_full
self.suffix = args.suffix
self.platformArgs = ['--no-cache'] if args.no_cache == True else []
self.baseImage = None
self.prereqsTag = None
# If we're building Windows containers, generate our Windows-specific configuration settings
if self.containerPlatform == 'windows':
self._generateWindowsConfig(args)
# If we're building Linux containers, generate our Linux-specific configuration settings
if self.containerPlatform == 'linux':
self._generateLinuxConfig(args)
def _generateWindowsConfig(self, args):
# Store the path to the directory containing our required Windows DLL files
self.defaultDllDir = os.path.join(os.environ['SystemRoot'], 'System32')
self.dlldir = args.dlldir if args.dlldir is not None else self.defaultDllDir
# Determine base tag for the Windows release of the host system
self.hostRelease = WindowsUtils.getWindowsRelease()
self.hostBasetag = WindowsUtils.getReleaseBaseTag(self.hostRelease)
# Store the tag for the base Windows Server Core image
self.basetag = args.basetag if args.basetag is not None else self.hostBasetag
self.baseImage = 'microsoft/dotnet-framework:4.7.2-sdk-windowsservercore-' + self.basetag
self.prereqsTag = self.basetag
# Verify that any user-specified base tag is valid
if WindowsUtils.isValidBaseTag(self.basetag) == False:
raise RuntimeError('unrecognised Windows Server Core base image tag "{}", supported tags are {}'.format(self.basetag, WindowsUtils.getValidBaseTags()))
# Set the memory limit Docker flags
if args.m is not None:
try:
self.memLimit = humanfriendly.parse_size(args.m) / (1000*1000*1000)
except:
raise RuntimeError('invalid memory limit "{}"'.format(args.m))
else:
self.memLimit = DEFAULT_MEMORY_LIMIT if args.random_memory == False else random.uniform(DEFAULT_MEMORY_LIMIT, DEFAULT_MEMORY_LIMIT + 2.0)
self.platformArgs.extend(['-m', '{:.2f}GB'.format(self.memLimit)])
# Set the isolation mode Docker flags
self.isolation = args.isolation if args.isolation is not None else 'default'
if self.isolation != 'default':
self.platformArgs.append('-isolation=' + self.isolation)
# Set the PDB truncation Docker flags
self.keepDebug = args.keep_debug
if self.keepDebug == True:
self.platformArgs.extend(['--build-arg', 'KEEP_DEBUG=1'])
def _generateLinuxConfig(self, args):
# Determine if we are building CUDA-enabled container images
self.cuda = None
if args.cuda is not None:
# Verify that the specified CUDA version is valid
self.cuda = args.cuda if args.cuda != '' else DEFAULT_CUDA_VERSION
if self.cuda not in LINUX_BASE_IMAGES['cudagl']:
raise RuntimeError('unsupported CUDA version "{}", supported versions are: {}'.format(
self.cuda,
', '.join([v for v in LINUX_BASE_IMAGES['cudagl']])
))
# Use the appropriate base image for the specified CUDA version
self.baseImage = LINUX_BASE_IMAGES['cudagl'][self.cuda]
self.prereqsTag = 'cudagl{}'.format(self.cuda)
else:
self.baseImage = LINUX_BASE_IMAGES['opengl']
self.prereqsTag = 'opengl'
|
[
"adam.rehn@my.jcu.edu.au"
] |
adam.rehn@my.jcu.edu.au
|
c3503b21d2104e0e4b8f3d87ebe23954c6c35551
|
d12cadea9d18ec6599b2ef650ce6af1a0d39afb1
|
/Server.py
|
5b0908b7852dfeadf2bb64c4853ad4c9596f6219
|
[] |
no_license
|
zhantong/wetwo-server
|
074facf45695f054855eaff26bd2dc3272a1aba0
|
7271acc9047da65d647a572160683b043b8163a1
|
refs/heads/master
| 2021-08-27T19:41:40.549066
| 2017-11-28T05:19:50
| 2017-11-28T05:19:50
| 103,080,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,088
|
py
|
from flask import Flask, request, render_template, redirect, jsonify, g
import flask_login
from WeTwo import WeTwo
app = Flask(__name__)
app.secret_key = '\xecG>\xc3\xe6\xe5\xbds\xa5\xf1\xae\x81u\x19\xb0`\x88W\xc6\\\xb7\xfeL\xcc'
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
class User(flask_login.UserMixin):
pass
def get_wetwo():
if not hasattr(g, 'wetwo'):
g.wetwo = WeTwo()
return g.wetwo
@login_manager.user_loader
def user_loader(user_id):
if not get_wetwo().is_user_id_exists(user_id):
return
user = User()
user.id = user_id
return user
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html')
user_name = request.form['name']
password = request.form['password']
if user_name and password and get_wetwo().is_password_correct(user_name=user_name, password=password):
user_id = get_wetwo().get_user_id(user_name)
user = User()
user.id = user_id
flask_login.login_user(user)
return 'Login Success'
return 'Bad Login'
@app.route('/api/login', methods=['POST'])
def api_login():
user_name = request.form['name']
password = request.form['password']
if user_name and password and get_wetwo().is_password_correct(user_name=user_name, password=password):
user_id = get_wetwo().get_user_id(user_name)
user = User()
user.id = user_id
flask_login.login_user(user)
return jsonify({'status': True, 'message': '登录成功'})
return jsonify({'status': False, 'message': '登录失败'})
@app.route('/logout')
def logout():
flask_login.logout_user()
return 'Logged out'
@app.route('/api/logout')
def api_logout():
flask_login.logout_user()
return jsonify({'status': True, 'message': '注销成功'})
@app.route('/')
@flask_login.login_required
def index():
articles = get_wetwo().get_articles()
for article in articles:
article['comments'] = get_wetwo().get_comments(article['article_id'])
return render_template('index.html', articles=articles)
@app.route('/api/getUserInfo')
@flask_login.login_required
def api_get_user_info():
user_id = flask_login.current_user.id
user_name = get_wetwo().get_user_name(user_id)
num_unread_notifications = get_wetwo().get_num_unread_comments(user_id)
info = {
'id': user_id,
'name': user_name,
'num_unread_notifications': num_unread_notifications
}
return jsonify(info)
@app.route('/api/getAllArticles')
@flask_login.login_required
def api_get_all_articles():
offset = request.args['offset'] if 'offset' in request.args else 0
limit = request.args['limit'] if 'limit' in request.args else 20
articles = get_wetwo().get_articles(offset=offset, limit=limit)
for article in articles:
article['comments'] = get_wetwo().get_comments(article['article_id'])
return jsonify(articles)
@app.route('/api/getArticles')
@flask_login.login_required
def api_get_articles():
user_id = flask_login.current_user.id
articles = get_wetwo().get_articles(user_id)
for article in articles:
article['comments'] = get_wetwo().get_comments(article['article_id'])
return jsonify(articles)
@app.route('/api/getArticle')
@flask_login.login_required
def api_get_article():
article_id = request.args['articleId']
article = get_wetwo().get_article(article_id)
article['comments'] = get_wetwo().get_comments(article['article_id'])
return jsonify(article)
@app.route('/postArticle', methods=['POST'])
@flask_login.login_required
def post_article():
article = request.form['article']
user_id = flask_login.current_user.id
article_id = get_wetwo().post_article(article, user_id)
return redirect('/')
@app.route('/api/postArticle', methods=['POST'])
@flask_login.login_required
def api_post_article():
article = request.form['article']
time = request.form['time'] if 'time' in request.form else None
user_id = flask_login.current_user.id
article_id = get_wetwo().post_article(article, user_id, time)
return jsonify({'status': True, 'articleId': article_id})
@app.route('/postComment', methods=['POST'])
@flask_login.login_required
def post_comment():
article_id = request.form['articleId']
comment = request.form['comment']
parent_comment_id = request.form['parentCommentId']
user_id = flask_login.current_user.id
get_wetwo().post_comment(article_id, user_id, comment, parent_comment_id)
article = get_wetwo().get_article(article_id)
article['comments'] = get_wetwo().get_comments(article_id)
return render_template('comment.html', article=article)
@app.route('/api/postComment', methods=['POST'])
@flask_login.login_required
def api_post_comment():
article_id = request.form['articleId']
comment = request.form['comment']
parent_comment_id = request.form['parentCommentId']
time = request.form['time'] if 'time' in request.form else None
user_id = flask_login.current_user.id if 'userId' not in request.form else request.form['userId']
comment_id = get_wetwo().post_comment(article_id, user_id, comment, parent_comment_id, time)
return jsonify({'status': True, 'commentId': comment_id})
@app.route('/api/getUnreadComments')
@flask_login.login_required
def api_get_unread_comments():
user_id = flask_login.current_user.id
comments = get_wetwo().get_unread_comments(user_id)
return jsonify(comments)
@app.route('/api/setCommentRead', methods=['POST'])
@flask_login.login_required
def api_set_comment_read():
comment_id = request.form['commentId']
get_wetwo().set_comment_read(comment_id)
return jsonify({'status': True})
@app.route('/protected')
@flask_login.login_required
def protected():
return 'Logged in as: ' + flask_login.current_user.id
@app.teardown_appcontext
def teardown_appcontext(error):
if hasattr(g, 'wetwo'):
del g.wetwo
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
[
"zhantong1994@163.com"
] |
zhantong1994@163.com
|
2f6b30e1b2d944c0354a536d1c08f9eff2fa2e31
|
867846ed1df7f560ccc473413a70020155f66ad4
|
/fixMarkdownHeadings.py
|
1645916fdfb4c594d76c9f5feaf75e762005a85a
|
[] |
no_license
|
abhineet123/PTF
|
84297bf5aa95320dbc2d34f422f2dd563ff65a58
|
0c63f7f8251af0d70c329b2cef53694db76c1656
|
refs/heads/master
| 2023-08-18T18:34:40.513936
| 2023-08-09T17:28:51
| 2023-08-09T17:28:51
| 157,794,848
| 5
| 1
| null | 2021-05-16T18:48:32
| 2018-11-16T01:24:05
|
MATLAB
|
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
import pyperclip
from Tkinter import Tk
from anytree import Node, RenderTree
def findChildren(_headings, root_level, _start_id, _root_node, n_headings):
nodes = []
_id = _start_id
while _id < n_headings:
_heading, line_id = _headings[_id]
words = _heading.split(' ')
curr_level = words[0].count('#')
if curr_level <= root_level:
break
heading_words = []
for word in words[1:]:
if word.startswith('@'):
break
if word and not word.isspace():
heading_words.append(word)
parent_text = ''
if _root_node is not None and _root_node.parent is not None:
parent_text = _root_node.name
if curr_level > 2:
# parent_text = str(_root_node)
parent_text = '{}/{}'.format(parent_text, _root_node.parent_text)
heading_text = '_'.join(heading_words)
new_node = Node(heading_text, parent=_root_node, orig_text=_heading, parent_text=parent_text,
marker=words[0], line_id=line_id)
nodes.append(new_node)
child_nodes, ___id = findChildren(_headings, curr_level, _id + 1, new_node, n_headings)
nodes += child_nodes
_id = ___id
return nodes, _id
def main():
in_txt = Tk().clipboard_get()
lines = in_txt.split('\n')
lines = [line for line in lines]
start_t = None
curr_t = None
curr_root = Node("root_node")
headings = [(k, i) for i, k in enumerate(lines) if k.startswith('#')]
n_headings = len(headings)
heading_id = 0
level = 0
nodes, _ = findChildren(headings, 0, 0, curr_root, n_headings)
print(RenderTree(curr_root))
# out_txt = in_txt
for node in nodes:
if node.is_root or node.parent.is_root:
continue
orig_text = node.orig_text
new_text = '{} {} @ {}'.format(node.marker, node.name, node.parent_text)
lines[node.line_id] = new_text
print('{}: new_text: {}'.format(node, new_text))
# out_txt = out_txt.replace(orig_text + '\n', new_text)
out_txt = '\n'.join(lines)
# print(out_txt)
# with open(out_fname, 'w') as out_fid:
# out_fid.write(out_txt)
try:
pyperclip.copy(out_txt)
spam = pyperclip.paste()
except pyperclip.PyperclipException as e:
print('Copying to clipboard failed: {}'.format(e))
if __name__ == '__main__':
main()
|
[
"asingh1@ualberta.ca"
] |
asingh1@ualberta.ca
|
271a78833a1218dfa0f8b72a67a4f57a00c22f77
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/pa65DgwG5HMbtf6iY_17.py
|
75b738a5317af85ec0083358898136fca0256512
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
class player():
def __init__(self, name, age, height, weight):
self.name = name
self.age = age
self.height = height
self.weight = weight
def get_age(self):
return self.name + " is age " + str(self.age)
def get_height(self):
return self.name + " is " + str(self.height) + "cm"
def get_weight(self):
return self.name + " weighs " + str(self.weight) + "kg"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f92344a7b55176c215b569cd01d99ec9c4fd8ee9
|
4c7baee40b96e6499f96d6fe81935437264c9c88
|
/stock_scraper/Indicators/RSI.py
|
d153d32174cf5e1a805325812081c1df70ba9210
|
[
"MIT"
] |
permissive
|
webclinic017/Stock-Analysis
|
083d376484adebcad2d52113749a513aa48b09a8
|
eea8cb5bcb635f12eb15ac13306ef16e2892cd92
|
refs/heads/master
| 2022-04-13T00:20:54.287730
| 2020-03-29T21:05:22
| 2020-03-29T21:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,277
|
py
|
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import pandas as pd
class kRSI():
def __init__(self):
#do nothing
print "In kRSI class"
def CalculateRSI(self, avgGain, avgLoss):
if avgLoss == 0:
return 100
rs = avgGain / abs(avgLoss)
rsi = 100 - ( 100 / ( 1 + rs))
return rsi
def Calculate(self, dataFrame):
##### ALGORITHM #####
# 100
# RSI = 100 - --------
# 1 + RS
# RS = Average Gain / Average Loss
# The very first calculations for average gain and average loss are simple 14-period averages.
# First Average Gain = Sum of Gains over the past 14 periods / 14.
# First Average Loss = Sum of Losses over the past 14 periods / 14
# The second, and subsequent, calculations are based on the prior averages and the current gain loss:
# Average Gain = [(previous Average Gain) x 13 + current Gain] / 14.
# Average Loss = [(previous Average Loss) x 13 + current Loss] / 14.
close = dataFrame['Close']
change = close.diff()
change = change.fillna(0)
firstAvgGain = 0
firstAvgLoss = 0
rsiSeries = pd.Series()
for i in range(14):
# Appending first 14 dummy value to RSI series
rsiSeries = rsiSeries.append(pd.Series({dataFrame.index[i]: 0}))
if change[i]>0:
firstAvgGain = firstAvgGain + change[i]
else:
firstAvgLoss = firstAvgLoss + change[i]
firstAvgGain = firstAvgGain/14
firstAvgLoss = firstAvgLoss/14
rsiValue = self.CalculateRSI(firstAvgGain, firstAvgLoss)
rsiSeries[13] = rsiValue
avgGain = firstAvgGain;
avgLoss = firstAvgLoss
for i in range(14, close.count()):
if change[i]>0:
avgGain = ((avgGain * 13) + change[i]) / 14
else:
avgLoss = ((avgLoss * 13) + change[i]) / 14
rsiValue = self.CalculateRSI(avgGain, avgLoss)
rsiSeries = rsiSeries.append(pd.Series({dataFrame.index[i]: rsiValue}))
#print rsiSeries
return rsiSeries
|
[
"singhanurag50@gmail.com"
] |
singhanurag50@gmail.com
|
2d46b4f1438042afecc40c4acf20344806223487
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R1/benchmark/startQiskit_Class245.py
|
b531b8a8a74a4a467b8ee86bca4983619acd57d1
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,321
|
py
|
# qubit number=3
# total number=46
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class245.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
0c5ab6e315afa21be876ef1a62aeaa6b3aaaff97
|
379a473d6f572b7fb0c00ffa3387931a6bebb082
|
/Chapter9/plot_confusion_matrix.py
|
061034801b4a3a58edac77979b4adc9847936748
|
[] |
no_license
|
Willianan/Data_Analysis_and_Mining
|
6746f75dcade79f9134574d5962ec5bc19da51de
|
8c526e5d12a535fde1b8b5c84b21007289b8eb20
|
refs/heads/master
| 2020-04-28T15:26:58.227850
| 2019-04-07T13:51:41
| 2019-04-07T13:51:41
| 175,367,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
|
[
"noreply@github.com"
] |
Willianan.noreply@github.com
|
4decd0022d8b9f5903a2348dd6c8ca81a7787360
|
d25c89a54ad980c68bc8d247eb43f88499617dda
|
/src/ocr_line_curation_chars.py
|
1dda0fa5a2a17ad15a2000d2ca7bd195293a99de
|
[] |
no_license
|
nakamura196/amami
|
5922e396b89850d18660e465d8e6af498b28b967
|
19bdc712bb24ab325806c3cb2bb4666d16182768
|
refs/heads/master
| 2023-01-07T05:29:19.145424
| 2020-11-06T04:31:16
| 2020-11-06T04:31:16
| 286,902,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,639
|
py
|
from PIL import Image
import sys
sys.path.append('/path/to/dir')
import json
import pyocr
import pyocr.builders
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
tool = tools[0]
print("Will use tool '%s'" % (tool.get_name()))
langs = tool.get_available_languages()
print("Available languages: %s" % ", ".join(langs))
builder = pyocr.builders.TextBuilder()
builder = pyocr.tesseract.CharBoxBuilder()
builder = pyocr.builders.LineBoxBuilder() # tesseract_layout=6
with open('../docs/iiif/amami/manifest.json') as f:
df = json.load(f)
canvases = df["sequences"][0]["canvases"]
members = []
for i in range(len(canvases)):
page = str(i+1).zfill(4)
canvas = canvases[i]
canvas_id = canvas["@id"]
image_url = canvas["images"][0]["resource"]["@id"]
filename = image_url.split("/")[-1]
im_path = "../docs/files/large/"+filename
char_boxes = tool.image_to_string(
Image.open(im_path),
lang='jpn_vert',
builder=builder
)
# print(char_boxes)
im = Image.open(im_path)
for j in range(len(char_boxes)):
box = char_boxes[j]
# box.position は左下を原点とした ((min-x, min-y), (max-x, max-y)) らしい。
# ここでは左上を原点とした x, y, width, height に変換してみる
x = box.position[0][0]
y = im.height - box.position[1][1]
width = box.position[1][0] - x
height = im.height - box.position[0][1] - y
text = box.content.replace(" ", "")
print("\t".join([
text, # 文字
str(x), str(y), str(width), str(height),
# 確信度 str(box.confidence),
]))
if text == "":
continue
member_id = canvas_id+"#xywh="+str(x)+","+str(y)+","+str(width)+","+str(height)
member = {
"label": "Page "+page+"_"+str(j+1),
"metadata": [
{
"label": "Annotation",
"value": [
{
"on": member_id,
"resource": {
"@type": "cnt:ContentAsText",
"chars": text,
"marker": {
"text": text,
},
"format": "text/html"
},
"motivation": "sc:painting",
"@id": "http://codh.rois.ac.jp/char-shape/book/200003803/annotation/"+"Page "+page+"_"+str(j+1),
"@type": "oa:Annotation"
}
]
}
],
"@id": member_id,
"@type": "sc:Canvas"
}
members.append(member)
break
curation = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@type": "cr:Curation",
"@id": "https://mp.ex.nii.ac.jp/api/curation/json/aaa5d585-3cd2-4651-ba98-71769b028e19",
"label": "Curating list",
"selections": [
{
"@id": "https://mp.ex.nii.ac.jp/api/curation/json/aaa5d585-3cd2-4651-ba98-71769b028e19/range1",
"@type": "sc:Range",
"label": "Manual curation by IIIF Curation Viewer",
"members": members,
"within": {
"@id": "https://raw.githubusercontent.com/nakamura196/amami/master/docs/iiif/amami/manifest.json",
"@type": "sc:Manifest",
"label": "奄美大島"
}
}
]
}
fw = open("../docs/curation/test_line.json", 'w')
json.dump(curation, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
|
[
"na.kamura.1263@gmail.com"
] |
na.kamura.1263@gmail.com
|
9013611ed7ed83fb4065f9d6e3bb601c6efacc71
|
9f84d91a8ae3df53b07fe3267992fba00a99ac9e
|
/torch_geometric/graphgym/contrib/__init__.py
|
47365d98aadd0b304b67af8266d8e4228eb52f85
|
[
"MIT"
] |
permissive
|
pyg-team/pytorch_geometric
|
ebea601eae228f3905465b5c2349d3fb3bb5cb26
|
a52af694b8ce6a80811e20966fe6d08a3e7511fe
|
refs/heads/master
| 2023-08-31T04:13:40.943308
| 2023-08-30T12:48:42
| 2023-08-30T12:48:42
| 106,024,057
| 6,775
| 1,563
|
MIT
| 2023-09-14T17:10:18
| 2017-10-06T16:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
from .act import * # noqa
from .config import * # noqa
from .encoder import * # noqa
from .head import * # noqa
from .layer import * # noqa
from .loader import * # noqa
from .loss import * # noqa
from .network import * # noqa
from .optimizer import * # noqa
from .pooling import * # noqa
from .stage import * # noqa
from .train import * # noqa
from .transform import * # noqa
|
[
"noreply@github.com"
] |
pyg-team.noreply@github.com
|
0ce6670c9e67f8a7231b9c6d03cec2f066c58ab0
|
376c8f2c9051b8dffe851fab7c831f96dcf06ddb
|
/dp/1965_상자넣기.py
|
e430c04d04043fdf653ac4dee4d15404c6d63625
|
[] |
no_license
|
getChan/algorithm
|
cad3ac74ac686ec4306ad8db551700d35e27a782
|
6a82c04cdbf670e3140b1a8685480a3f37c82c62
|
refs/heads/master
| 2021-06-30T01:08:42.270514
| 2020-09-19T07:55:45
| 2020-09-19T07:55:45
| 140,247,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
n = int(input())
boxes = [int(_) for _ in input().split()]
dp = [1 for _ in range(n)]
# 최장증가수열 문제
# dp[i] : i에서 끝나는 최장증가수열
answer = 1
for i in range(0, n):
for j in range(0, i):
if boxes[i] > boxes[j] and dp[i] < dp[j]+1:
dp[i] = dp[j] + 1
if dp[i] > answer:
answer = dp[i]
print(answer)
|
[
"9511chn@gmail.com"
] |
9511chn@gmail.com
|
1a0fbf08f1e836f3287ff05cb95026d3db0e9c4d
|
071ca9494ce811cdf52dc585ec863dc621a7865b
|
/test_coroutines.py
|
c82bc510a64e9c940b63bebf76764d13498b1922
|
[] |
no_license
|
phaustin/parallel_project
|
37d7ea7dbc6de8406d50e47142b271e05c1552eb
|
821cca5ad7dcf5c3b6caa2ca3f20358b86120d06
|
refs/heads/master
| 2021-08-22T08:18:01.675473
| 2017-11-29T17:56:57
| 2017-11-29T17:56:57
| 112,510,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
#http://blog.thumbtack.net/python-coroutines/
def coroutine(f):
def wrapper(*arg, **kw):
c = f(*arg, **kw)
c.send(None)
return c
return wrapper
@coroutine
def logger(prefix="", next=None):
while True:
message = yield
print("{0}: {1}".format(prefix, message))
if next:
next.send(message)
@coroutine
def cache_checker(cache, onsuccess=None, onfail=None):
while True:
request = yield
if request in cache and onsuccess:
onsuccess.send(cache[request])
elif onfail:
onfail.send(request)
@coroutine
def load_balancer(*workers):
while True:
for worker in workers:
request = yield
worker.send(request)
@coroutine
def worker(cache, response, next=None):
while True:
request = yield
cache[request] = response
if next:
next.send(response)
cache = {}
response_logger = logger("Response")
cluster = load_balancer(
logger("Worker 1", worker(cache, 1, response_logger)),
logger("Worker 2", worker(cache, 2, response_logger)),
logger("Worker 3", worker(cache, 3, response_logger)),
)
cluster = cache_checker(cache, response_logger, cluster)
cluster = logger("Request", cluster)
if __name__ == "__main__":
from random import randint
for i in range(20):
cluster.send(randint(1, 5))
|
[
"paustin@eos.ubc.ca"
] |
paustin@eos.ubc.ca
|
957b52ebdc5a273f9ccdd5b16a60f4b0053ff1bd
|
2a4be1e256ed19c8dd5d37cb3cfbe7f50bb4f8f6
|
/Landing/wsgi.py
|
7a21379d42a306591ffa506ec52344f303075fa1
|
[] |
no_license
|
Miker69/Landing_telebot
|
55c3b34aac3db5753203260421ae7f2584160122
|
f32538d563c74108053418b177340b634f26c5f3
|
refs/heads/master
| 2023-04-04T03:48:58.941446
| 2021-04-13T15:37:36
| 2021-04-13T15:37:36
| 357,578,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for Landing project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Landing.settings')
application = get_wsgi_application()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
1a7fafcd552464eccc415a7cd7f6c13f568bd217
|
c2c03e034513a766c7de8298be428fb3eab3ab7b
|
/chainerrl/NeverSay20/env/bin/wheel
|
6aec0ddb9ff1fc7d022b5e8ab8645344758aa131
|
[] |
no_license
|
hamko/sample
|
434adeca12e11587edce8cad799162b84c7f5071
|
9b0624b99e3e551d6b72b632d3a7d1a38aac7a9f
|
refs/heads/master
| 2021-01-17T02:51:25.174354
| 2018-10-23T02:40:04
| 2018-10-23T02:40:04
| 9,640,383
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
#!/home/hamko/git/sample/chainerrl/TicTacToe/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"wakataberyo@gmail.com"
] |
wakataberyo@gmail.com
|
|
f8022236e53f1c2f42d0dfc05b69d592acb001a4
|
0d0c13d80924b6e5cfc74a623eb250a5fd2e2cca
|
/Stacks/sliding window maximum.py
|
b848b90e9f5e374590087d07587cd492462bd090
|
[
"Apache-2.0"
] |
permissive
|
Akashdeep-Patra/problemSolving
|
54e2fc3c3a9587b8c976921f6fc45364af1dfcac
|
c278e5d090af7370e56789e68b7bb73dc37165f8
|
refs/heads/master
| 2022-11-15T19:20:54.585886
| 2020-06-29T10:47:39
| 2020-06-29T10:47:39
| 258,956,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
from collections import deque
class Solution:
# @param A : tuple of integers
# @param B : integer
# @return a list of integers
def slidingMaximum(self, a, k):
q=deque()
for i in range(k):
if(len(q)==0 or a[q[-1]]>=a[i]):
q.append(i)
else:
while(len(q)!=0 and a[q[-1]]<a[i]):
q.pop()
q.append(i)
ans=[]
ans.append(a[q[0]])
for i in range(k,len(a)):
if(len(q)==0 or a[q[-1]]>=a[i]):
q.append(i)
else:
while(len(q)!=0 and a[q[-1]]<a[i]):
q.pop()
q.append(i)
while(len(q)!=0 and q[0]<=i-k):
q.popleft()
ans.append(a[q[0]])
return ans
"""
Sliding Window Maximum
Problem Description
Given an array of integers A. There is a sliding window of size B which is moving from the very left of the array to the very right. You can only see the B numbers in the window. Each time the sliding window moves rightwards by one position. You have to find the maximum for each window.
Return an array C, where C[i] is the maximum value in the array from A[i] to A[i+B-1].
Refer to the given example for clarity.
NOTE: If B > length of the array, return 1 element with the max of the array.
Problem Constraints
1 <= |A|, B <= 106
Input Format
The first argument given is the integer array A.
The second argument given is the integer B.
Output Format
Return an array C, where C[i] is the maximum value of from A[i] to A[i+B-1].
Example Input
Input 1:
A = [1, 3, -1, -3, 5, 3, 6, 7]
B = 3
Input 2:
A = [1, 2, 3, 4, 2, 7, 1, 3, 6]
B = 6
Example Output
Output 1:
[3, 3, 5, 5, 6, 7]
Output 2:
[7, 7, 7, 7]
Example Explanation
Explanation 1:
Window position | Max
--------------------|-------
[1 3 -1] -3 5 3 6 7 | 3
1 [3 -1 -3] 5 3 6 7 | 3
1 3 [-1 -3 5] 3 6 7 | 5
1 3 -1 [-3 5 3] 6 7 | 5
1 3 -1 -3 [5 3 6] 7 | 6
1 3 -1 -3 5 [3 6 7] | 7
Explanation 2:
Window position | Max
--------------------|-------
[1 2 3 4 2 7] 1 3 6 | 7
1 [2 3 4 2 7 1] 3 6 | 7
1 2 [3 4 2 7 1 3] 6 | 7
1 2 3 [4 2 7 1 3 6] | 7
"""
|
[
"replituser@example.com"
] |
replituser@example.com
|
779a04c19db092d2dee3ac7a2cee5ec9378b58bc
|
da47e42519b6d5eb37bdb634fd618672706e79da
|
/localizacion_metromed/Txt_file_module/models/txt_activo.py
|
d825bda0ddc8ed2165a4344dd28d866ff52ec13b
|
[] |
no_license
|
Tysamncaweb/produccion2
|
02bbbccefc4f4cd0d0948b1b0552d931f804fb9b
|
b95909d0689fc787185290565f0873040a6027cf
|
refs/heads/master
| 2022-04-26T13:51:22.316294
| 2020-04-29T19:58:35
| 2020-04-29T19:58:35
| 260,013,639
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,580
|
py
|
# -*- coding: utf-8 -*-
# Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from odoo import models, api, fields
from logging import getLogger
_logger = getLogger(__name__)
class bono(models.TransientModel):
_inherit = "account.wizard.generacion.txtfile"
@api.multi
def print_bono2(self):
VAR = 0
VAR2 = 0
concepto2 = self.concepto.upper()
totalpago = 0
if self.bancose == 'activo':
# /////////////////////////////Creacion del archivo .txt en carpeta local odoo///////////////////////////////////
file = open("archivo.txt", "w")
# /////////7////////calculos y ceacion de datos para el .txt/////////////////////////////////////////////////////
self.invoices = self.env['hr.payslip'].search(
[('date_to', '<=', self.date_to), ('date_from', '>=', self.date_from)])
_logger.info("\n\n\n {} \n\n\n".format(self.invoices))
date_f = str(self.date_imp)
a = date_f[0:4]
m = date_f[5:7]
d = date_f[8:]
#saco el encabezado
for invoice in self.invoices:
# traigo el numero de cuenta
cuenta = invoice.employee_id.account_number_2
if cuenta:
filtro = cuenta[0:4]
else:
filtro = '1234'
if filtro == '0171':
VAR2 += 1
for n in invoice.line_ids:
#varsuma = n.total
#varsuma = float("{0:.2f}".format(varsuma))..
totalpago += n.total
totalpago = float("{0:.2f}".format(totalpago))
totalpago = str(totalpago)
for i in range(0, len(totalpago)):
if (totalpago[i] == '.'):
cds = totalpago[i + 1:]
if len(cds) == 2:
ceroextra = '0'
imprimir0 = ''
else:
ceroextra = ''
imprimir0 = '0'
#escribo en el txt
totalpago = totalpago.replace(".", ",")
lineas = ['H',
';',
VAR2,
';',
totalpago,
imprimir0,
';',
concepto2,
';',
self.nlote,
';',
d,m,a]
for l in lineas:
file.write(str(l))
file.write('\n')
for invoice in self.invoices:
# traigo el numero de cuenta
cuenta = invoice.employee_id.account_number_2
if cuenta:
filtro = cuenta[0:4]
else:
filtro = '1234'
if filtro == '0171':
letra = invoice.employee_id.nationality
ncedu = invoice.employee_id.identification_id_2
catcedu = len(ncedu)
if catcedu == 7:
catce = '00'
if catcedu == 8:
catce = '0'
# catce, es los ceros que se agregan antes de la cedula
# ncedu, es el numero de cedula
# calculo del monto total de nomina
busqueda = self.env['hr.salary.rule.category'].search([('id', '!=', 0)])
if busqueda:
for a in busqueda:
if a.name == 'Net':
ttotal = a.id
busqueda2 = self.env['hr.payslip.line'].search([('id', '!=', 0)])
for vip in invoice.line_ids:
for vip2 in busqueda2:
if vip == vip2:
if vip2.category_id.id == ttotal:
totalpago = vip2.total
totalpago = float("{0:.2f}".format(totalpago))
totalpago = str(totalpago)
for i in range(0, len(totalpago)):
if (totalpago[i] == '.'):
cds = totalpago[i + 1:]
if len(cds) == 2:
ceroextra = '0'
imprimir0 = ''
else:
ceroextra = ''
imprimir0 = '0'
totalpago = totalpago.replace(".", ",")
VAR += 1
# imprimo en el txt
lineas = ['P',
';',
letra,
catce,
ncedu,
';',
totalpago,
imprimir0,
';',
concepto2,
';',
VAR,
';',
'000'
]
for l in lineas:
file.write(str(l))
file.write('\n')
file.close()
nombretxt = 'CargaMasivadepagodeNómina.txt'
nameclass = 'account.wizard.generacion.txtfile'
return self.imprimir_txt(nombretxt,nameclass)
|
[
"soporte.innova2129@gmail.com"
] |
soporte.innova2129@gmail.com
|
d4156f6b387f2c2bbfab43d6331fe0e83479c75c
|
d2332604fc80b6d622a263b2af644425a7e703de
|
/facebook/trees_and_graphs/12_accounts_merge.py
|
9d2fd775ac9e45fdb77ea0793a06731010f2ed00
|
[] |
no_license
|
abhijitdey/coding-practice
|
b3b83a237c1930266768ce38500d6812fc31c529
|
6ae2a565042bf1d6633cd98ed774e4a77f492cc8
|
refs/heads/main
| 2023-08-14T23:31:06.090613
| 2021-10-18T21:35:56
| 2021-10-18T21:35:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
from typing import List
"""
1. First, create a graph (adjacency list) connecting all emails that are related to the same account
2. Second, keep track of the account name for each unique email
3. Find connected components in the graph:
a. Each connected component refers to the same account.
b. So, all unique emails in a connected component belong to the same account. Hence, sorting them gives the answer
- Use DFS to traverse the graph and find the connected components
"""
from collections import defaultdict
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
if not accounts:
return []
emailToName = dict()
# The adj list will contain all emails connected to each email from all the accounts
adj_list = defaultdict(set)
visited = set()
for account in accounts:
name = account[0]
for email in account[1:]:
first_email = account[1]
adj_list[first_email].add(email)
adj_list[email].add(first_email)
emailToName[email] = name
def dfs(email):
visited.add(email)
stack = [email]
components = [email]
while len(stack) > 0:
email = stack.pop()
for neighbor in adj_list[email]:
if neighbor in visited:
continue
# Add the neighbor to the stack
stack.append(neighbor)
components.append(neighbor)
visited.add(neighbor)
return components
# We need to run DFS from each email and see how many connected components are present
result = []
for email in adj_list:
if email in visited:
continue
components = dfs(email)
result.append([emailToName[email]] + sorted(components))
return result
|
[
"ashiz2013@gmail.com"
] |
ashiz2013@gmail.com
|
8556153eee128df8d4a4b2c68b116c9fc5edad6e
|
67416177cd9e221db0b20332c02dcc7680fcdd0e
|
/이것이 취업을 위한 코딩 테스트다/Chapter05_DFS_BFS/Q04_S.py
|
0e89d038a59dde7bce9a39cb6bf567d78488ff5a
|
[] |
no_license
|
svclaw2000/Algorithm
|
4fe5e3bf50888b974df4f3d87387a003b5249352
|
b6d92cf0d18997e9e973d5f731ecb44a7935d93a
|
refs/heads/main
| 2023-06-21T21:50:13.089719
| 2021-07-11T14:18:47
| 2021-07-11T14:18:47
| 363,825,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
from collections import deque
N, M = map(int, input().split())
maze = [list(map(int, input())) for _ in range(N)]
ds = ((-1, 0), (0, 1), (1, 0), (0, -1))
def dfs(x, y):
queue = deque()
queue.append((x, y))
while queue:
x, y = queue.popleft()
for dx, dy in ds:
nx, ny = x + dx, y + dy
if not 0 <= nx < N or not 0 <= ny < M or maze[nx][ny] == 0: # 범위 벗어나거나 괴물이면 무시
continue
if maze[nx][ny] == 1: # 최초 방문(거리 1) 시 최단거리 저장 및 다른 곳 탐색
maze[nx][ny] = maze[x][y] + 1
queue.append((nx, ny))
return maze[-1][-1]
print(dfs(0, 0))
|
[
"svclaw2000@gmail.com"
] |
svclaw2000@gmail.com
|
4ce810b7a98c0b77591ee5e277abc2db6860b0c2
|
556db265723b0cc30ad2917442ed6dad92fd9044
|
/tensorflow/python/profiler/profiler_v2_test.py
|
42fbeba1e98be36f2c2c401a3db60bfbe1f1b5a8
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
graphcore/tensorflow
|
c1669b489be0e045b3ec856b311b3139858de196
|
085b20a4b6287eff8c0b792425d52422ab8cbab3
|
refs/heads/r2.6/sdk-release-3.2
| 2023-07-06T06:23:53.857743
| 2023-03-14T13:04:04
| 2023-03-14T13:48:43
| 162,717,602
| 84
| 17
|
Apache-2.0
| 2023-03-25T01:13:37
| 2018-12-21T13:30:38
|
C++
|
UTF-8
|
Python
| false
| false
| 4,412
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.x profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.profiler import trace
class ProfilerTest(test_util.TensorFlowTestCase):
def test_profile_exceptions(self):
logdir = self.get_temp_dir()
profiler.start(logdir)
with self.assertRaises(errors.AlreadyExistsError):
profiler.start(logdir)
profiler.stop()
with self.assertRaises(errors.UnavailableError):
profiler.stop()
# Test with a bad logdir, and it correctly raises exception and deletes
# profiler.
# pylint: disable=anomalous-backslash-in-string
profiler.start('/\/\/:123')
# pylint: enable=anomalous-backslash-in-string
with self.assertRaises(Exception):
profiler.stop()
profiler.start(logdir)
profiler.stop()
def test_save_profile(self):
logdir = self.get_temp_dir()
profiler.start(logdir)
with trace.Trace('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
profiler.stop()
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
for file_name in gfile.ListDirectory(logdir):
if gfile.IsDirectory(os.path.join(logdir, file_name)):
self.assertEqual(file_name, 'plugins')
else:
self.assertTrue(file_name.endswith('.profile-empty'))
profile_dir = os.path.join(logdir, 'plugins', 'profile')
run = gfile.ListDirectory(profile_dir)[0]
hostname = socket.gethostname()
overview_page = os.path.join(profile_dir, run,
hostname + '.overview_page.pb')
self.assertTrue(gfile.Exists(overview_page))
input_pipeline = os.path.join(profile_dir, run,
hostname + '.input_pipeline.pb')
self.assertTrue(gfile.Exists(input_pipeline))
tensorflow_stats = os.path.join(profile_dir, run,
hostname + '.tensorflow_stats.pb')
self.assertTrue(gfile.Exists(tensorflow_stats))
kernel_stats = os.path.join(profile_dir, run, hostname + '.kernel_stats.pb')
self.assertTrue(gfile.Exists(kernel_stats))
trace_file = os.path.join(profile_dir, run, hostname + '.trace.json.gz')
self.assertTrue(gfile.Exists(trace_file))
def test_profile_with_options(self):
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1)
profiler.start(logdir, options)
with trace.Trace('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
profiler.stop()
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
def test_context_manager_with_options(self):
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1)
with profiler.Profile(logdir, options):
with trace.Trace('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
if __name__ == '__main__':
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
2a812d45d9da8870b85821736661cd9b51ad3c61
|
c7cebec6209866b02ee654cffeafe0f2cf0646f1
|
/implementation/oceangame.py
|
8c2fcb4e93ffdc7cf0b2c1af8292a4326c4b7a2d
|
[] |
no_license
|
dondon17/algorithm
|
5492cf039a96ecf5a944816bdca9b5755e5a2623
|
da4d6ca1c21c31c6521a62b38855e0b9cf4b0d91
|
refs/heads/master
| 2023-05-02T14:54:35.185914
| 2021-05-30T07:31:40
| 2021-05-30T07:31:40
| 323,802,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
n, m = map(int, input().split())
x, y, _dir = map(int, input().split())
check = [[0]*m for _ in range(n)] # 0으로 채워진 n * m 행렬 생성
check[x][y] = 1 # 시작 위치 방문 처리
_map = []
for i in range(n):
_map.append(list(map(int, input().split())))
count = 1
turn_time = 0 # 4방이 이미 방문한 곳이거나 바다인 경우를 체크하기 위함
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
def turnleft():
global _dir
_dir -= 1
if _dir == -1:
_dir = 3
while True:
turnleft()
nx = x + dx[_dir]
ny = y + dy[_dir]
if check[nx][ny] == 0 and _map[nx][ny] == 0:
check[nx][ny] = 1
x, y = nx, ny
count += 1
turn_time = 0
continue
else:
turn_time+=1
if turn_time == 4:
nx = x-dx[_dir]
ny = y-dy[_dir]
if _map[nx][ny] == 0:
x, y = nx, ny
else: break
turn_time = 0
print(count)
|
[
"qwerqw889@ajou.ac.kr"
] |
qwerqw889@ajou.ac.kr
|
f45b44a0ce075baa3867c855c1d857223d4631c4
|
ef4a1748a5bfb5d02f29390d6a66f4a01643401c
|
/algorithm/algorithm_week/week3/problem_3.py
|
1dfac10225edd5d46d7a5f1b0aee2c62ba86901b
|
[] |
no_license
|
websvey1/TIL
|
aa86c1b31d3efc177df45503d705b3e58b800f8e
|
189e797ba44e2fd22a033d1024633f9e0128d5cf
|
refs/heads/master
| 2023-01-12T10:23:45.677578
| 2019-12-09T07:26:59
| 2019-12-09T07:26:59
| 162,102,142
| 0
| 1
| null | 2022-12-11T16:31:08
| 2018-12-17T08:57:58
|
Python
|
UTF-8
|
Python
| false
| false
| 498
|
py
|
import sys
sys.stdin = open("problem_3.txt", "r")
T = int(input())
for tc in range(1, T+1):
comp = input()
total = input()
len_comp = len(comp)
len_total = len(total)
empty_list = [0] * len_comp
result = 0
# print(empty_list)
for i in range(len_comp):
for j in range(len_total):
if total[j] == comp[i]:
empty_list[i] += 1
result = max(empty_list)
print(f'#{tc} {result}')
##################### dict 사용해보기
|
[
"websvey1@gmail.com"
] |
websvey1@gmail.com
|
d4263f84ee75cfae0c1c0448bd6e638c64abaaec
|
f68e0b205bd3eb036905c60bd03a8d9c7f3b1d88
|
/gluon-tutorials-zh-master/chapter_optimization/adagrad-gluon.py
|
9fe3fd6be483681cf7e1719444f2b19373cdcb0f
|
[
"Apache-2.0"
] |
permissive
|
SleepyBag/TrivialPractice
|
c31458d0c28afba158cb4090cb7013267ff54bb2
|
8e006fbe1425f62b52b2a5fe5b6404ea1883f3ab
|
refs/heads/master
| 2020-03-22T00:34:37.415074
| 2018-06-30T14:02:04
| 2018-06-30T14:02:04
| 139,253,389
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,188
|
py
|
import mxnet as mx
from mxnet import gluon, nd
from mxnet.gluon import nn
import sys
sys.path.append('..')
import utils
# 生成数据集。
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = nd.random.normal(scale=1, shape=(num_examples, num_inputs))
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += nd.random.normal(scale=0.01, shape=labels.shape)
# 线性回归模型。
net = nn.Sequential()
net.add(nn.Dense(1))
learning_rate = .01
net.collect_params().initialize(mx.init.Normal(sigma=1), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': learning_rate})
utils.optimize(batch_size=10, trainer=trainer, num_epochs=5, decay_epoch=None,
log_interval=10, features=features, labels=labels, net=net)
net.collect_params().initialize(mx.init.Normal(sigma=1), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'adagrad',
{'learning_rate': learning_rate})
utils.optimize(batch_size=10, trainer=trainer, num_epochs=5, decay_epoch=None,
log_interval=10, features=features, labels=labels, net=net)
|
[
"xueqianming200@gmail.com"
] |
xueqianming200@gmail.com
|
86fee19decbe6fbd99256621b5d77459a4c80b51
|
a6cc157fdd1a15e9d451af653cf3eadbdac60885
|
/cpp_develop/catkin_ws/src/ros_arduino_bridge/ros_arduino_python/src/ros_arduino_python/calibrate_linear.py
|
3ab1f901ef9a959a737978f37d4c3bb5cebabeba
|
[] |
no_license
|
miaoruonan/morn
|
9b4f0b64241c12140e8adc571579974d9e35a14b
|
88e353ce480265b0b0b12f22a67ce13dd2ff42f3
|
refs/heads/master
| 2021-06-26T05:18:58.321932
| 2021-01-28T07:21:51
| 2021-01-28T07:25:16
| 214,172,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,792
|
py
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, Point
from math import copysign, sqrt, pow
import tf
class CalibrateLinear():
def __init__(self):
#give the node a name
rospy.init_node('calibrate_linear', anonymous=False)
#set rospy to execute a shutdown function when terminating the script
rospy.on_shutdown(self.shutdown)
#How fast will we check the odometry values?
self.rate = 10
r = rospy.Rate(self.rate)
#set the distance to travel
self.test_distance = 1.5
self.speed = 0.2
self.tolerance = 0.01
self.odom_linear_scale_correction = 1.0
self.start_test = True
#Publisher to control the robot's speed
self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=5)
#The base frame is base_footprint for the robot
self.base_frame = rospy.get_param('~base_frame', '/base_footprint')
#The odom frame is usually just /odom
self.odom_frame = rospy.get_param('~odom_frame', '/odom')
#initialize the tf listener
self.tf_listener = tf.TransformListener()
#give tf some time to fill its buffer
rospy.sleep(2)
#make sure we see the odom and base frames
self.tf_listener.waitForTransform(self.odom_frame, self.base_frame, rospy.Time(), rospy.Duration(60.0))
self.position = Point()
#get the starting position from the tf transform between the odom and base frames
self.position = self.get_position()
x_start = self.position.x
y_start = self.position.y
move_cmd = Twist()
while not rospy.is_shutdown():
#Stop the robot by default
move_cmd = Twist()
if self.start_test:
#get the current position from the tf transform between the odom and base frames
self.position = self.get_position()
#compute the euclidean distance from the target point
distance = sqrt(pow((self.position.x - x_start), 2) +
pow((self.position.y - y_start), 2))
#correct the estimate distance by the correction factor
distance *= self.odom_linear_scale_correction
#How close are we?
error = distance - self.test_distance
#are we close enough?
if not self.start_test or abs(error) < self.tolerance:
self.start_test = False
params = False
rospy.loginfo(params)
else:
#if not, move in the appropriate direction
move_cmd.linear.x = copysign(self.speed, -1*error)
else:
self.position = self.get_position()
x_start = self.position.x
y_start = self.position.y
self.cmd_vel.publish(move_cmd)
r.sleep()
#stop the robot
self.cmd_vel.publish(Twist())
def get_position(self):
#get the current transform between the odom and base frames
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF exception")
return
return Point(*trans)
def shutdown(self):
#Always stop the robot when shutting down the node
rospy.loginfo("Stopping the robot")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
CalibrateLinear()
rospy.spin()
except:
rospy.loginfo("Calibration terminated.")
|
[
"15879209474@163.com"
] |
15879209474@163.com
|
7ffae845f088fb2c95c7c37d9f6e0559af611adc
|
6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41
|
/lib/phonenumbers/data/region_AC.py
|
243d707a8213496a114091c66baa4551350d4629
|
[] |
no_license
|
JamesBrace/InfluenceUWebLaunch
|
549d0b48ff3259b139cb891a19cb8b5382ffe2c8
|
332d25940e4b1b45a7a2a8200f77c8413543b199
|
refs/heads/master
| 2021-09-04T04:08:47.594900
| 2018-01-15T16:49:29
| 2018-01-15T16:49:29
| 80,778,825
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
"""Auto-generated file, do not edit by hand. AC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_AC = PhoneMetadata(id='AC', country_code=247, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[46]\\d{4}|[01589]\\d{5}', possible_number_pattern='\\d{5,6}', possible_length=(5, 6)),
fixed_line=PhoneNumberDesc(national_number_pattern='6[2-467]\\d{3}', possible_number_pattern='\\d{5}', example_number='62889', possible_length=(5,)),
mobile=PhoneNumberDesc(national_number_pattern='4\\d{4}', possible_number_pattern='\\d{5}', example_number='40123', possible_length=(5,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(national_number_pattern='[01589]\\d{5}', possible_number_pattern='\\d{6}', example_number='542011', possible_length=(6,)),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc())
|
[
"james.brace@mail.mcgill.ca"
] |
james.brace@mail.mcgill.ca
|
9f628a3fcb3ba15724f1abcb004ff2ed34f398a2
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/nonNegativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-nonNegativeInteger-enumeration-3-2.py
|
b576ac7679de66f6d806e4fa821974abf9447594
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 648
|
py
|
from output.models.nist_data.atomic.non_negative_integer.schema_instance.nistschema_sv_iv_atomic_non_negative_integer_enumeration_3_xsd.nistschema_sv_iv_atomic_non_negative_integer_enumeration_3 import NistschemaSvIvAtomicNonNegativeIntegerEnumeration3
from output.models.nist_data.atomic.non_negative_integer.schema_instance.nistschema_sv_iv_atomic_non_negative_integer_enumeration_3_xsd.nistschema_sv_iv_atomic_non_negative_integer_enumeration_3 import NistschemaSvIvAtomicNonNegativeIntegerEnumeration3Type
obj = NistschemaSvIvAtomicNonNegativeIntegerEnumeration3(
value=NistschemaSvIvAtomicNonNegativeIntegerEnumeration3Type.VALUE_9176
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
52164c85a80608ac931cec19edf6440abf757d17
|
e48a43af1b285f19137cf1c839ea5836312c6793
|
/toutiao.py
|
f330e711018b71ad4b9619c8c133b9cad3456107
|
[] |
no_license
|
willfengis/toutiao
|
5f933aadebd6a24d5f4a69c162a61efb4dcd68d0
|
10fc31a46368f9cc08e5889f4a644a2e0ffc7028
|
refs/heads/master
| 2021-01-21T10:47:17.788795
| 2017-08-31T10:09:27
| 2017-08-31T10:09:27
| 101,987,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
import requests
from urllib.parse import urlencode
from requests.exceptions import RequestException
import json
from bs4 import BeautifulSoup
import re
import pymongo
import os
from multiprocessing import Pool
from hashlib import md5
from com.it.ttconfig import *
client = pymongo.MongoClient(MONGO_URL,connect=False)
db = client[MONGO_DB]
def getHtml(Url):
try:
Myhtml = requests.get(Url,headers=Myheader)
if Myhtml.status_code == 200:
return Myhtml.text
return "getHtmlerror"
except RequestException:
return "getHtmlerror"
def getStr(Myhtml):
Data = json.loads(Myhtml)
if Data and "data" in Data.keys():
for url1 in Data.get("data"):
yield url1.get("share_url")
def getDetail(Url1):
try:
Myhtml = requests.get(Url1,headers=Myheader)
if Myhtml.status_code == 200:
return Myhtml.text
return "getDetailHtmlerror"
except RequestException:
print("Url1 error")
return "getDetailHtmlerror"
def getStr2(Myhtml2,Url1):
Bs4html = BeautifulSoup(Myhtml2,"lxml")
Mytitle = Bs4html.select("title")[0].get_text()
Myrule = re.compile("BASE_DATA.galleryInfo.*?gallery:(.*?),\n\s*?siblingList",re.S)
Mystr2 = re.search(Myrule,Myhtml2)
if Mystr2:
json_str2 = json.loads(Mystr2.group(1))
if json_str2 and "sub_images" in json_str2.keys():
sub_image = json_str2.get("sub_images")
image = [x.get("url") for x in sub_image]
for imageurlone in image:downLoad(imageurlone)
return {"title":Mytitle,"imageurl":image,"Url":Url1}
return "url2error"
return "url2error"
def saveMongo(imageurl):
if db[MONGO_TABLE].insert(imageurl):
print("url save to mongodb ok")
return True
return False
def downLoad(imageurlone):
try:
Myhtml = requests.get(imageurlone,headers=Myheader)
if Myhtml.status_code == 200:
saveImage(Myhtml.content)
return "getHtmlerror"
except RequestException:
print("downloadImagError")
return "getHtmlerror"
def saveImage(content):
path = "{0}/{1}.{2}".format(os.getcwd()+"/image",md5(content).hexdigest(),"jpg")
if not os.path.exists(path):
with open(path,"wb") as f:
f.write(content)
f.close()
print("downimage successful:"+ path)
def main(Offset):
Data = {'offset': Offset, 'format': 'json', 'keyword': Find, 'autoload': 'true', 'count': '20', 'cur_tab': '1'}
Url = "http://www.toutiao.com/search_content/?" + urlencode(Data)
Myhtml = getHtml(Url)
for Url1 in getStr(Myhtml):
Myhtml2 = getDetail(Url1)
imageurl = getStr2(Myhtml2,Url1)
if imageurl != "url2error":
saveMongo(imageurl)
if __name__ == "__main__":
page = [i*20 for i in range(0,6)]
pool = Pool()
pool.map(main,page)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
651465018b3370f246d12c0b45dead9a006898ac
|
5488617b1b05c436b1f8c8642ea75ca754719f8d
|
/phenomenological/Single_TOP/select_scripts/script/script_1065.py
|
07afd7a1b201fed85dd725acf70a9ea4ba7a34d7
|
[] |
no_license
|
wenxingfang/TW_Top
|
fdb1ba136be6ace8fdacaade58cb4ca4fcdc3c9e
|
389e76c904d08a59d9141b9b66ec15d2583f8e9a
|
refs/heads/master
| 2021-02-05T06:54:27.908688
| 2020-02-28T13:24:00
| 2020-02-28T13:24:00
| 243,754,087
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
import os
import ROOT
ROOT.gSystem.Load("/user/wenxing/ST_TW_channel/CMSSW_8_0_25/src/Phynomenological_study/Single_TOP/select_scripts/select_save_parton_C.so")
ROOT.gROOT.ProcessLine('select_save_parton("1065")')
print 'Done!'
|
[
"wenxing.fang@cern.ch"
] |
wenxing.fang@cern.ch
|
781541a1f0b86a43ce728e170b53055a99749d93
|
12123592a54c4f292ed6a8df4bcc0df33e082206
|
/py2/pgms/sec6/flask/db_create.py
|
667c9a87acb3e16e1bbb26111a89d32eaae62ba8
|
[] |
no_license
|
alvinooo/advpython
|
b44b7322915f832c8dce72fe63ae6ac7c99ef3d4
|
df95e06fd7ba11b0d2329f4b113863a9c866fbae
|
refs/heads/master
| 2021-01-23T01:17:22.487514
| 2017-05-30T17:51:47
| 2017-05-30T17:51:47
| 92,860,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
#!venv/bin/python
# db_create.py - create database
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
|
[
"alvin.heng@teradata.com"
] |
alvin.heng@teradata.com
|
c384ec15b71a910edb23a3ece597828f1918efc1
|
ba80ca143ba35fd481730786a27ebdb1f88ce835
|
/algorithm/Daily Coding Problem/전체탐색/5.py
|
b00f4a6021504dc364e8ea5b32dbdadd7ce6fbe1
|
[] |
no_license
|
uiandwe/TIL
|
c541020b65adc53578aeb1c3ba4c6770b3b2e8b3
|
186544469374dd0279099c6c6aa7555ee23e42fe
|
refs/heads/master
| 2022-02-15T08:33:07.270573
| 2022-01-01T15:22:54
| 2022-01-01T15:22:54
| 63,420,931
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
# -*- coding: utf-8 -*-
"""
Given an array of integers where every integer occurs three times except for one integer,
which only occurs once, find and return the non-duplicated integer.
For example, given [6, 1, 3, 3, 3, 6, 6], return 1. Given [13, 19, 13, 13], return 19.
Do this in O(N) time and O(1) space.
"""
def getSingle(arr):
ones = 0
twos = 0
n = len(arr)
for i in range(n):
twos = twos | (ones & arr[i])
ones = ones ^ arr[i]
common_bit_mask = ~(ones & twos)
ones &= common_bit_mask
twos &= common_bit_mask
return ones
if __name__ == '__main__':
print(getSingle([13, 1, 13, 13]))
print(getSingle([13, 1, 13, 13, 2, 3, 2, 3, 2, 3]))
|
[
"noreply@github.com"
] |
uiandwe.noreply@github.com
|
bcc9c809b5e3b2f36a892d4ae81d9509a2aba905
|
4cbc8b81d197bc392d1b57856254300331b9738f
|
/python/voz.py
|
9a63b10843050891efa76b73fcb08d8c72ccf6b6
|
[
"MIT"
] |
permissive
|
vcatafesta/chili
|
87b9606f17cda645ba44cbf2bb4cc4637e18d211
|
5c734ac88454db76eb2f4e92c13364a5bbc7a93a
|
refs/heads/main
| 2023-09-01T01:39:09.457448
| 2023-08-29T21:23:28
| 2023-08-29T21:23:28
| 171,972,556
| 2
| 2
| null | 2019-02-22T01:38:49
| 2019-02-22T01:26:46
| null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
# coding: cp860
import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone() as s:
r.adjust_for_ambient_noise(s)
while True:
audio = r.listen(s)
print("Voce respondeu:", r.recognize_google(audio, language = 'pt'))
|
[
"vcatafesta@gmail.com"
] |
vcatafesta@gmail.com
|
00fc5380aacd4b854d68cc5cd1802c1879b9e2e4
|
260306e56beaaa5ecad8f783d094ecbabef4705b
|
/blog.py
|
3bfbbfd1ea4129429f2520d6cd12680037202c45
|
[] |
no_license
|
xxnbyy/mytools
|
dd5f09033b2b794b3e56bf16b9d4f28fe1377503
|
88d99614f09dd7a96f787236d0bbf674dfc5fcf2
|
refs/heads/master
| 2021-04-29T10:13:07.230358
| 2016-12-29T13:16:56
| 2016-12-29T13:16:56
| 77,874,884
| 0
| 1
| null | 2017-01-03T01:49:32
| 2017-01-03T01:49:32
| null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
#############################################################
###
### _|_|_| _| _| _| _|
### _| _| _| _|_|_| _|_| _| _| _|_|_|_|
### _|_| _|_| _| _| _| _| _| _| _|
### _| _| _| _| _| _| _| _| _| _|
### _|_|_| _| _| _|_|_| _| _| _| _|_|
### _|
### _|
###
### name: blog.py
### function: write blog
### date: 2016-11-02
### author: quanyechavshuo
### blog: https://3xp10it.cc
#############################################################
import time
from exp10it import figlet2file
figlet2file("3xp10it",0,True)
time.sleep(1)
|
[
"quanyechavshuo@gmail.com"
] |
quanyechavshuo@gmail.com
|
7428a6f9ed8f18d1d5b40f66c207c09dbccfea2e
|
58ee1dc37b57e0b4f06cf383c6a9e0654f490150
|
/python-zict/lilac.py
|
74172c63bc984bdbddbadf099461698472befd29
|
[] |
no_license
|
MikeyBaldinger/arch4edu
|
f3af87ef3a8d4cd78fde7e0ef75658c17dbe8c06
|
c1775bf7fe0ffc87f3c8b4109fb1e8acde12a430
|
refs/heads/master
| 2022-12-23T16:40:55.513537
| 2020-09-28T21:00:59
| 2020-09-28T21:00:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
#!/usr/bin/env python3
from lilaclib import *
maintainers = [{'github': 'petronny', 'email': 'Jingbei Li <i@jingbei.li>'}]
update_on = [{'aur': None}, {'alias': 'python'}]
build_prefix = 'extra-x86_64'
repo_depends = ['python-heapdict']
pre_build = aur_pre_build
post_build = aur_post_build
if __name__ == '__main__':
single_main(build_prefix)
|
[
"i@jingbei.li"
] |
i@jingbei.li
|
5cf4e3cccdec114e403ab352d32e8640f5a6250b
|
b99bbc50ab1d039948ccf853963ae044a97498fb
|
/src/api/symbols/views/__init__.py
|
9f0254365c9be66a905516f608870a81172d33c2
|
[] |
no_license
|
fan1018wen/Alpha
|
26899cc0eb6761bf6bd8089e7d12716c9e7ae01e
|
c50def8cde58fd4663032b860eb058302cbac6da
|
refs/heads/master
| 2021-05-12T12:54:15.747220
| 2017-10-11T10:58:51
| 2017-10-11T10:58:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,736
|
py
|
from django.core.paginator import Paginator
from django.db.models import Q
from rest_framework.views import APIView
from common.models import BackstageHTTPResponse
from common.utils import log_exception
from symbols.filters import SymbolFilter
from symbols.models import Symbol
from symbols.serializers import SymbolSerializer
class SymbolListAPI(APIView):
@log_exception
def get(self, request, *args, **kwargs):
"""
公式可用数据点列表
---
parameters:
- name: index
description: 页数
type: integer
paramType: query
required: false
- name: number
description: 每页条数
type: integer
paramType: query
required: false
- name: table_name
description: 表名
type: string
paramType: query
required: false
- name: classification_1
description: 第一维度
type: string
paramType: query
required: false
- name: classification_2
description: 第二维度
type: string
paramType: query
required: false
"""
symbols = Symbol.objects.all()
symbols = SymbolFilter(request.GET, queryset=symbols).qs
paginator = Paginator(symbols, request.GET.get('number', 100))
page = paginator.page(request.GET.get('index', 1))
serializer = SymbolSerializer(page, many=True)
return BackstageHTTPResponse(
code=BackstageHTTPResponse.API_HTTP_CODE_NORMAL,
data=serializer.data,
pageinfo=page
).to_response()
class TableListAPI(APIView):
@log_exception
def get(self, request, *args, **kwargs):
"""
公式可用表
---
parameters:
- name: index
description: 页数
type: integer
paramType: query
required: false
- name: number
description: 每页条数
type: integer
paramType: query
required: false
"""
table_names = list(Symbol.objects.exclude(
Q(table_name__isnull=True)|(Q(table_name=''))
).values_list('table_name', flat=True).order_by('table_name').distinct())
return BackstageHTTPResponse(
code=BackstageHTTPResponse.API_HTTP_CODE_NORMAL,
data=table_names,
).to_response()
class SymbolClassificationListAPI(APIView):
@log_exception
def get(self, request, classification, *args, **kwargs):
"""
公式可用区分维度
---
parameters:
- name: classification
description: 第几个区分维度
type: integer
paramType: path
required: true
"""
column_name = 'classification_%s' % classification
if column_name not in [i.attname for i in Symbol._meta.fields]:
return BackstageHTTPResponse(
code=BackstageHTTPResponse.API_HTTP_CODE_NOT_FOUND,
message='未找到数据'
).to_response()
query_dict_1 = {'%s__isnull' % column_name: True}
query_dict_2 = {column_name: ''}
column_values = list(Symbol.objects.exclude(
Q(**query_dict_1)|(Q(**query_dict_2))
).values_list(column_name, flat=True).order_by(column_name).distinct())
return BackstageHTTPResponse(
code=BackstageHTTPResponse.API_HTTP_CODE_NORMAL,
data=column_values,
).to_response()
|
[
"mengqiang@1b2b.cn"
] |
mengqiang@1b2b.cn
|
9ae6c6e071cb9074c94b0058901696c52d416298
|
a0eb6744e6f7f509b96d21f0bc8b3f8387f6861c
|
/notebook/union_find_basic_usage.py
|
01c45b5560292afba9712dec8f754ebe83733393
|
[
"MIT"
] |
permissive
|
nkmk/python-snippets
|
a6c66bdf999502e52f4795a3074ced63bf440817
|
f9dd286a9cf93f474e20371f8fffc4732cb3c4d5
|
refs/heads/master
| 2023-08-03T04:20:05.606293
| 2023-07-26T13:21:11
| 2023-07-26T13:21:11
| 98,900,570
| 253
| 77
|
MIT
| 2020-10-25T01:12:53
| 2017-07-31T14:54:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
from union_find_basic import UnionFindBasic, UnionFindPathCompression, UnionFindByRank, UnionFindBySize, UnionFind
ufb = UnionFindBasic(5)
print(ufb.parents)
# [0, 1, 2, 3, 4]
ufb.union(3, 4)
print(ufb.parents)
ufb.union(2, 3)
print(ufb.parents)
ufb.union(1, 2)
print(ufb.parents)
ufb.union(0, 4)
print(ufb.parents)
# [0, 1, 2, 3, 3]
# [0, 1, 2, 2, 3]
# [0, 1, 1, 2, 3]
# [0, 0, 1, 2, 3]
print([ufb.find(i) for i in range(5)])
# [0, 0, 0, 0, 0]
ufpc = UnionFindPathCompression(5)
print(ufpc.parents)
# [0, 1, 2, 3, 4]
ufpc.union(3, 4)
print(ufpc.parents)
ufpc.union(2, 3)
print(ufpc.parents)
ufpc.union(1, 2)
print(ufpc.parents)
ufpc.union(0, 4)
print(ufpc.parents)
# [0, 1, 2, 3, 3]
# [0, 1, 2, 2, 3]
# [0, 1, 1, 2, 3]
# [0, 0, 1, 1, 1]
print([ufpc.find(i) for i in range(5)])
# [0, 0, 0, 0, 0]
ufbr = UnionFindByRank(5)
print(ufbr.parents)
# [0, 1, 2, 3, 4]
ufbr.union(3, 4)
print(ufbr.parents)
ufbr.union(2, 3)
print(ufbr.parents)
ufbr.union(1, 2)
print(ufbr.parents)
ufbr.union(0, 4)
print(ufbr.parents)
# [0, 1, 2, 3, 3]
# [0, 1, 3, 3, 3]
# [0, 3, 3, 3, 3]
# [3, 3, 3, 3, 3]
ufbs = UnionFindBySize(5)
print(ufbs.parents)
# [0, 1, 2, 3, 4]
ufbs.union(3, 4)
print(ufbs.parents)
ufbs.union(2, 3)
print(ufbs.parents)
ufbs.union(1, 2)
print(ufbs.parents)
ufbs.union(0, 4)
print(ufbs.parents)
# [0, 1, 2, 3, 3]
# [0, 1, 3, 3, 3]
# [0, 3, 3, 3, 3]
# [3, 3, 3, 3, 3]
print(ufbs.size)
# [1, 1, 1, 5, 1]
print(ufbs.size[ufbs.find(0)])
# 5
uf = UnionFind(5)
print(uf.parents)
# [-1, -1, -1, -1, -1]
uf.union(3, 4)
print(uf.parents)
uf.union(2, 3)
print(uf.parents)
uf.union(1, 2)
print(uf.parents)
uf.union(0, 4)
print(uf.parents)
# [-1, -1, -1, -2, 3]
# [-1, -1, 3, -3, 3]
# [-1, 3, 3, -4, 3]
# [3, 3, 3, -5, 3]
|
[
"nkmk.on@gmail.com"
] |
nkmk.on@gmail.com
|
5be778cd62c0fc4fb164b11572b2864f06dd6ffe
|
4a0f8c5c0e8324fa614da776f2a704b5c369ccbb
|
/topologyTest/GetDDIs_150_250Examples_WithDifferentDomainNames.py
|
cafde521581bfddd4fd57d7b907fcb2ae3e1149d
|
[] |
no_license
|
magic2du/contact_matrix
|
9f8ae868d71e7e5c8088bf22a9407ea3eb073be6
|
957e2ead76fabc0299e36c1435162edd574f4fd5
|
refs/heads/master
| 2021-01-18T21:15:07.341341
| 2015-09-16T02:14:53
| 2015-09-16T02:14:53
| 24,237,641
| 0
| 0
| null | 2015-09-10T19:58:24
| 2014-09-19T16:48:37
| null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
import _mysql
from dealFile import *
#Get of Domains which has more than 2 interfaces have 16-20 examples
db=_mysql.connect(host="localhost",user="root",passwd="zxcv4321",db="DDI")
#db.query("""select COUNT(*) from PPI inner join example on (ID = PPI_ID) where domain1="ACT" and domain2="ACT" and topology_1 = 6 and topology_2 = 6""")
#db.query("""select * from PPI inner join example on (ID = PPI_ID) where domain1="ACT" and domain2="ACT" """)
ddiList=readDDIsFile('listOfFolders15OCT.txt')
ddis=[]
#Number of Domains which has 2 interfaces have more than 15 examples
for ddi in ddiList:
[domain1,domain2]=ddi
if domain1 == domain2:
continue
#print i
#print domain1
#print domain2
#query='SELECT DISTINCT topology_1,topology_2 from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
#query='SELECT DISTINCT topology_1,topology_2 from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
query='SELECT COUNT(DISTINCT topology_1,topology_2) from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
#print query
#query='select domain1,domain2 from DDI1'
db.query(query)
result=db.store_result()
numTopology=result.fetch_row(0)
print numTopology[0][0]
if numTopology[0][0]<2:
break
try:
query='SELECT COUNT(*) from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
#print query
db.query(query)
result=db.store_result()
numExample=result.fetch_row(0)
print int(numExample[0][0])
if int(numExample[0][0])>150 and int(numExample[0][0])<250:
ddis.append(domain1+'_int_'+domain2)
except:
print 'error'
break
writeListFile('listOfDDIsHaveOver2InterfacesHave150-250Examples.txt',ddis)
#print result.fetch_row()
#print r[0][0] readDDIsFile('listOfDDIsHave2InterfacesOver15.txt')
|
[
"magic2du@gmail.com"
] |
magic2du@gmail.com
|
b5ab5cda1555793b46c2e5542858767a98e8ef6e
|
658e2e3cb8a4d5343a125f7deed19c9ebf06fa68
|
/course_DE/udacity-data-engineering-projects-master/Project 5 - Data Pipelines with Airflow/exercises/dags/3_ex3_subdags/dag.py
|
bc6617c67ca71e8cfaa562b89339fc7fdf1fc524
|
[] |
no_license
|
yennanliu/analysis
|
3f0018809cdc2403f4fbfe4b245df1ad73fa08a5
|
643ad3fed41961cddd006fadceb0e927f1db1f23
|
refs/heads/master
| 2021-01-23T21:48:58.572269
| 2020-10-13T22:47:12
| 2020-10-13T22:47:12
| 57,648,676
| 11
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,352
|
py
|
# Instructions
# In this exercise, we’ll place our S3 to RedShift Copy operations into a SubDag.
# 1 - Consolidate HasRowsOperator into the SubDag
# 2 - Reorder the tasks to take advantage of the SubDag Operators
import datetime
from airflow import DAG
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.operators.udacity_plugin import HasRowsOperator
from lesson3.exercise3.subdag import get_s3_to_redshift_dag
import sql_statements
start_date = datetime.datetime.utcnow()
dag = DAG(
"lesson3.exercise3",
start_date=start_date,
)
trips_task_id = "trips_subdag"
trips_subdag_task = SubDagOperator(
subdag=get_s3_to_redshift_dag(
"lesson3.exercise3",
trips_task_id,
"redshift",
"aws_credentials",
"trips",
sql_statements.CREATE_TRIPS_TABLE_SQL,
s3_bucket="udac-data-pipelines",
s3_key="divvy/unpartitioned/divvy_trips_2018.csv",
start_date=start_date,
),
task_id=trips_task_id,
dag=dag,
)
stations_task_id = "stations_subdag"
stations_subdag_task = SubDagOperator(
subdag=get_s3_to_redshift_dag(
"lesson3.exercise3",
stations_task_id,
"redshift",
"aws_credentials",
"stations",
sql_statements.CREATE_STATIONS_TABLE_SQL,
s3_bucket="udac-data-pipelines",
s3_key="divvy/unpartitioned/divvy_stations_2017.csv",
start_date=start_date,
),
task_id=stations_task_id,
dag=dag,
)
#
# TODO: Consolidate check_trips and check_stations into a single check in the subdag
# as we did with the create and copy in the demo
#
check_trips = HasRowsOperator(
task_id="check_trips_data",
dag=dag,
redshift_conn_id="redshift",
table="trips"
)
check_stations = HasRowsOperator(
task_id="check_stations_data",
dag=dag,
redshift_conn_id="redshift",
table="stations"
)
location_traffic_task = PostgresOperator(
task_id="calculate_location_traffic",
dag=dag,
postgres_conn_id="redshift",
sql=sql_statements.LOCATION_TRAFFIC_SQL
)
#
# TODO: Reorder the Graph once you have moved the checks
#
trips_subdag_task >> check_trips
stations_subdag_task >> check_stations
check_stations >> location_traffic_task
check_trips >> location_traffic_task
|
[
"f339339@gmail.com"
] |
f339339@gmail.com
|
a0fcd76bb531bd1b8db92bfd0f143b1ac789e17f
|
f983d2fc949bc0de944755a19e57e5d15466dd98
|
/homeads/mails.py
|
a837ea5fa7f51062eae6d086a491ffecd86079ce
|
[] |
no_license
|
wd5/localized_classified_ads
|
2c523a58372a3963d15f01e52709e1923df20ca7
|
49414088a8ba7f09da35f005b15652efd2bcdb18
|
refs/heads/master
| 2020-12-25T15:30:40.113192
| 2012-11-01T15:29:02
| 2012-11-01T15:29:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,646
|
py
|
#-*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from utils.mails import AdEmailMultiAlternatives
class HomeEmail(AdEmailMultiAlternatives):
"""
Class used to send multi alternavies email (text + html)
for AcheterSansCom and LouerSansCom
"""
def get_default_context(self):
if self.ad:
if self.ad.__class__.__name__ == "HomeForSaleAd":
self.default_context = {'linkColor': '#20B2AB',
'secondColor': '#FFB82E'}
if self.ad.__class__.__name__ == "HomeForRentAd":
self.default_context = {'linkColor': '#9D81A1',
'secondColor': 'Pink'}
else:
domain = Site.objects.get_current().domain
if domain == 'achetersanscom.com':
self.default_context = {'linkColor': '#20B2AB',
'secondColor': '#FFB82E'}
if domain == 'louersanscom.com':
self.default_context = {'linkColor': '#9D81A1',
'secondColor': 'Pink'}
return self.default_context
def get_default_images(self):
if self.ad:
if self.ad.__class__.__name__ == "HomeForSaleAd":
self.default_files = (('img/home.png', 'logo'),
('img/shadow_bottom.jpg', 'shadow'))
if self.ad.__class__.__name__ == "HomeForRentAd":
self.default_files = (('img/apartment.png', 'logo'),
('img/shadow_bottom.jpg', 'shadow'))
else:
domain = Site.objects.get_current().domain
if domain == 'achetersanscom.com':
self.default_files = (('img/home.png', 'logo'),
('img/shadow_bottom.jpg', 'shadow'))
if domain == 'louersanscom.com':
self.default_files = (('img/apartment.png', 'logo'),
('img/shadow_bottom.jpg', 'shadow'))
return self.default_files
class UserSignIn(HomeEmail):
"""
User Sign In
"""
subject = u"[{{ site.name }}] Validation de votre inscription"
template_name = 'emails/user_sign_in/body'
class HomeAdCreatedMessageEmail(HomeEmail):
"""
Home Ad Created Message Email
Send when user create a new ad
"""
subject = u"[{{ site.name }}] Annonce créée"
template_name = 'emails/home_ad_created/body'
class HomeAdUpdatedMessageEmail(HomeEmail):
"""
Home Ad Update Message Email
Send when user update an ad
"""
subject = u"[{{ site.name }}] Annonce mise à jour"
template_name = 'emails/home_ad_updated/body'
class BuyerToVendorMessageEmail(HomeEmail):
"""
User message email from buyer to vendor for an Ad
"""
subject = u'[{{ site.name }}] Nouveau message à propos de votre bien'
template_name = 'emails/to_vendor_message/body'
class VendorToBuyerMessageEmail(HomeEmail):
"""
User message email from vendor to buyer for an Ad
"""
subject = u'[{{ site.name }}] Nouveau message à propos de votre recherche'
template_name = 'emails/to_buyer_message/body'
class NewPotentialBuyerToVendorMessageEmail(HomeEmail):
"""
Mail sent to vendor when a user has it search coincides with it ad
"""
subject = u'[{{ site.name }}] Une nouvelle personne pourrait être interessée par votre bien'
template_name = 'emails/to_vendor_potential_buyer/body'
class NewAdToBuyerMessageEmail(HomeEmail):
"""
Mail sent to inform a user that a new ad corresponds to it search
"""
subject = u'[{{ site.name }}] Un nouveau bien correspond à votre recherche'
template_name = 'emails/to_buyer_potential_ad/body'
|
[
"samuel.goldszmidt@gmail.com"
] |
samuel.goldszmidt@gmail.com
|
2d91f87c27aff3220f48df0e44ec5d65370af653
|
c831e7f6c434900d817f59a11b25e78a1a5090ad
|
/Calibration/CalibConfigFiles/MuonCalibration/CalibConfig_DetModel89_RecoStage38.py
|
4f213b2a710dcce9269a2f654daa625b68761bfc
|
[] |
no_license
|
StevenGreen1/OptimisationStudies
|
8cca03f57d2cbf81e5fb609f13e2fa4b9c9880f6
|
c5741e8d2fab4752ceca8b10cc5f2bbc1a7fafa9
|
refs/heads/master
| 2021-01-18T21:30:51.418785
| 2017-02-21T16:27:50
| 2017-02-21T16:27:50
| 44,306,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,896
|
py
|
# Digitisation Constants - ECal
CalibrECal = 42.121331495
# Digitisation Constants ILDCaloDigi - HCal
CalibrHCalBarrel = 47.5716455642
CalibrHCalEndcap = 53.3873293873
CalibrHCalOther = 29.2886957667
# Digitisation Constants NewLDCCaloDigi - HCal
CalibrHCal = -1
# Digitisation Constants - Muon Chamber
CalibrMuon = 56.7
# MIP Peak position in directed corrected SimCaloHit energy distributions
# used for realistic ECal and HCal digitisation options
CalibrECalMIP = 0.0001475
CalibrHCalMIP = 0.0004925
# MIP Peak position in directed corrected CaloHit energy distributions
# used for MIP definition in PandoraPFA
ECalToMIPCalibration = 153.846
HCalToMIPCalibration = 41.841
MuonToMIPCalibration = 10.3093
# EM and Had Scale Settings
ECalToEMGeVCalibration = 1.01529193221
HCalToEMGeVCalibration = 1.12124159762
ECalToHadGeVCalibration = 1.08839104614
HCalToHadGeVCalibration = 1.12124159762
# Pandora Threshold Cuts
ECalMIPThresholdPandora = 0.5
HCalMIPThresholdPandora = 0.3
# Hadronic Energy Truncation in HCal PandoraPFA
MaxHCalHitHadronicEnergy = 1
# Timing ECal
ECalBarrelTimeWindowMax = 1000000
ECalEndcapTimeWindowMax = 1000000
# Timing HCal
HCalBarrelTimeWindowMax = 1000000
HCalEndcapTimeWindowMax = 1000000
|
[
"sg1sg2sg3@hotmail.co.uk"
] |
sg1sg2sg3@hotmail.co.uk
|
836dfafcfdee968679acbc1cd37e6add131774e2
|
994a82e4d859e605cf67736446aadcaf3cca2ec8
|
/examples/query_horizon.py
|
f5d72107849b8b207524dd159b162cfb44653d09
|
[
"Apache-2.0"
] |
permissive
|
kingdavid6336/py-stellar-base
|
fe7a5af576b7f03f7d36badca6a540232719e7cc
|
5e22370113e81eca1096ae62d58a5e663ffebca7
|
refs/heads/master
| 2021-12-18T14:20:01.146139
| 2020-06-21T07:55:59
| 2020-06-21T07:58:57
| 231,694,118
| 1
| 0
|
Apache-2.0
| 2020-06-21T12:04:49
| 2020-01-04T01:52:57
|
Python
|
UTF-8
|
Python
| false
| false
| 464
|
py
|
from stellar_sdk import Server
server = Server(horizon_url="https://horizon-testnet.stellar.org")
# get a list of transactions that occurred in ledger 1400
transactions = server.transactions().for_ledger(1400).call()
print(transactions)
# get a list of transactions submitted by a particular account
transactions = server.transactions() \
.for_account(account_id="GASOCNHNNLYFNMDJYQ3XFMI7BYHIOCFW3GJEOWRPEGK2TDPGTG2E5EDW") \
.call()
print(transactions)
|
[
"4catcode@gmail.com"
] |
4catcode@gmail.com
|
b6d49962e507d1202269880c14641540b5bffc8d
|
b22778ed4a21cc1102512ae7da7e8225b5f5299e
|
/examples/vector_v3.py
|
b5c655b305449e8e06758c7fe3ff2eeb22089f93
|
[
"MIT"
] |
permissive
|
afcarl/pythonic-api
|
4722358935075878ff91a640174a2e5d0ae5764d
|
764cb9dba9418c591d6d0cef20401b58d8ce0b1b
|
refs/heads/master
| 2020-03-18T16:41:40.297453
| 2016-07-30T19:45:41
| 2016-07-30T19:45:41
| 134,980,879
| 1
| 0
| null | 2018-05-26T17:18:08
| 2018-05-26T17:18:08
| null |
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
"""
A multi-dimensional ``Vector`` class, take 3
"""
from array import array
import math
import reprlib
import numbers
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __eq__(self, other):
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
def __str__(self):
return str(tuple(self))
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
# ...
def __mul__(self, scalar):
if isinstance(scalar, numbers.Real):
return Vector(n * scalar for n in self)
else:
return NotImplemented
|
[
"luciano@ramalho.org"
] |
luciano@ramalho.org
|
daebceb3498025be0ba64616015b483e5246c793
|
3c54f853a782e07675b809cada049debe3d415b1
|
/main/rates/management/commands/get_rate_data.py
|
a5e02efdf4c86d7cd7a4d9b3c79e36f983ab86be
|
[
"MIT"
] |
permissive
|
Hawk94/coin_tracker
|
ebf82a17aff1ae84aa7de872734dbf1616022de5
|
082909e17308a8dd460225c1b035751d12a27106
|
refs/heads/master
| 2021-01-24T08:12:37.041745
| 2017-08-10T11:01:19
| 2017-08-10T11:01:19
| 93,378,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from main.rates.models import Rate
import requests
import datetime
import decimal
class Command(BaseCommand):
help = 'Gets todays BTC price and saves it to the database'
def handle(self, *args, **options):
base_url = 'https://openexchangerates.org/api/latest.json?app_id={}'
request_json = requests.get(base_url.format(settings.OPEN_EXCHANGE_APP_ID)).json()['rates']
eur_rate = 1 / request_json['EUR']
gbp_rate = 1 / request_json['GBP']
date = datetime.date.today()
Rate.objects.create(date=date, eur_rate=eur_rate, gbp_rate=gbp_rate)
self.stdout.write(self.style.SUCCESS('Successfully created exchange rate records!'))
|
[
"x99tom.miller@gmail.com"
] |
x99tom.miller@gmail.com
|
123ff2163d0dea2759b84eef2ebe3fab6a5fdbff
|
7cc53a80f8ca9716e2e6893b6fd98ddab326061c
|
/iHome/web_html.py
|
453a18b89d1a6db8eec578debdba818165338f0e
|
[] |
no_license
|
zengsiquan/ihome
|
4b98c476fdf381ad18113b070a44e48432b51a58
|
40926f74d46bc76de4aecd98cfc52302ecf72f1b
|
refs/heads/master
| 2020-03-10T00:55:06.484107
| 2018-04-10T13:22:52
| 2018-04-10T13:22:52
| 129,093,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
# -*- coding:utf-8 -*-
from flask import Blueprint,current_app,make_response
from flask_wtf import csrf
html = Blueprint('html',__name__)
@html.route('/<re(".*"):file_name>')
def get_html(file_name):
if not file_name:
file_name = 'index.html'
if file_name != "favicon.ico":
file_name= 'html/'+file_name
response = make_response(current_app.send_static_file(file_name))
csrf_token = csrf.generate_csrf()
response.set_cookie('csrf_token',csrf_token)
# return current_app.send_static_file(file_name)
return response
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
718743c7da0e3030a59f7358c4988be1a2d87356
|
534570bbb873293bd2646a1567b63d162fbba13c
|
/Python/Data Structure/Binary Tree/Serilization:Deserialization/Verify Preorder Serialization of a Binary Tree.py
|
9a2274a297c9e044d6cc6fe2a8830f27f34a8bea
|
[] |
no_license
|
XinheLIU/Coding-Interview
|
fa3df0f7167fb1bc6c8831748249ebaa6f164552
|
d6034c567cef252cfafca697aa316c7ad4e7d128
|
refs/heads/master
| 2022-09-17T14:30:54.371370
| 2022-08-19T15:53:35
| 2022-08-19T15:53:35
| 146,382,499
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
class Solution:
def isValidSerialization(self, preorder: str) -> bool:
stack = []
top = -1
preorder = preorder.split(',')
for s in preorder:
stack.append(s)
top += 1
while self.endsWithTwoHashes(stack,top):
h = stack.pop()
top -= 1
h = stack.pop()
top -= 1
if top < 0:
return False
h = stack.pop()
stack.append('#')
#print stack
if len(stack) == 1:
if stack[0] == '#':
return True
return False
def endsWithTwoHashes(self,stack,top):
if top < 1:
return False
if stack[top]=='#' and stack[top-1]=='#':
return True
return False
|
[
"LIUXinhe@outlook.com"
] |
LIUXinhe@outlook.com
|
1cae18ce8bce6554011b5d4dd4091266f3224738
|
4d9b7b5f12b343e515609b063bdf5c31fe89a4f9
|
/asynchttp/websocket.py
|
ad23d2e17d1975f029217e182f7e79048da1b7f1
|
[
"BSD-3-Clause"
] |
permissive
|
oohlaf/asynchttp
|
6aa956695dd82a60854d98afbf09741ce5c1fee9
|
2fb6a3b321c130e7b87cf1de03f042b89579a702
|
refs/heads/master
| 2021-01-16T19:47:18.929253
| 2013-10-10T23:36:13
| 2013-10-10T23:36:13
| 13,546,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,484
|
py
|
"""WebSocket protocol versions 13 and 8."""
__all__ = ['WebSocketParser', 'WebSocketWriter', 'do_handshake',
'Message', 'WebSocketError',
'MSG_TEXT', 'MSG_BINARY', 'MSG_CLOSE', 'MSG_PING', 'MSG_PONG']
import base64
import binascii
import collections
import hashlib
import struct
from asynchttp import errors
# Frame opcodes defined in the spec.
OPCODE_CONTINUATION = 0x0
MSG_TEXT = OPCODE_TEXT = 0x1
MSG_BINARY = OPCODE_BINARY = 0x2
MSG_CLOSE = OPCODE_CLOSE = 0x8
MSG_PING = OPCODE_PING = 0x9
MSG_PONG = OPCODE_PONG = 0xa
WS_KEY = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
WS_HDRS = ('UPGRADE', 'CONNECTION',
'SEC-WEBSOCKET-VERSION', 'SEC-WEBSOCKET-KEY')
Message = collections.namedtuple('Message', ['tp', 'data', 'extra'])
class WebSocketError(Exception):
"""WebSocket protocol parser error."""
def WebSocketParser(out, buf):
while True:
message = yield from parse_message(buf)
out.feed_data(message)
if message.tp == MSG_CLOSE:
out.feed_eof()
break
def parse_frame(buf):
"""Return the next frame from the socket."""
# read header
data = yield from buf.read(2)
first_byte, second_byte = struct.unpack('!BB', data)
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
# frame-rsv1 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
if rsv1 or rsv2 or rsv3:
raise WebSocketError('Received frame with non-zero reserved bits')
if opcode > 0x7 and fin == 0:
raise WebSocketError('Received fragmented control frame')
if fin == 0 and opcode == OPCODE_CONTINUATION:
raise WebSocketError(
'Received new fragment frame with non-zero opcode')
has_mask = (second_byte >> 7) & 1
length = (second_byte) & 0x7f
# Control frames MUST have a payload length of 125 bytes or less
if opcode > 0x7 and length > 125:
raise WebSocketError(
"Control frame payload cannot be larger than 125 bytes")
# read payload
if length == 126:
data = yield from buf.read(2)
length = struct.unpack_from('!H', data)[0]
elif length > 126:
data = yield from buf.read(8)
length = struct.unpack_from('!Q', data)[0]
if has_mask:
mask = yield from buf.read(4)
if length:
payload = yield from buf.read(length)
else:
payload = b''
if has_mask:
payload = bytes(b ^ mask[i % 4] for i, b in enumerate(payload))
return fin, opcode, payload
def parse_message(buf):
fin, opcode, payload = yield from parse_frame(buf)
if opcode == OPCODE_CLOSE:
if len(payload) >= 2:
close_code = struct.unpack('!H', payload[:2])[0]
close_message = payload[2:]
return Message(OPCODE_CLOSE, close_code, close_message)
elif payload:
raise WebSocketError(
'Invalid close frame: {} {} {!r}'.format(fin, opcode, payload))
return Message(OPCODE_CLOSE, '', '')
elif opcode == OPCODE_PING:
return Message(OPCODE_PING, '', '')
elif opcode == OPCODE_PONG:
return Message(OPCODE_PONG, '', '')
elif opcode not in (OPCODE_TEXT, OPCODE_BINARY):
raise WebSocketError("Unexpected opcode={!r}".format(opcode))
# load text/binary
data = [payload]
while not fin:
fin, _opcode, payload = yield from parse_frame(buf)
if _opcode != OPCODE_CONTINUATION:
raise WebSocketError(
'The opcode in non-fin frame is expected '
'to be zero, got {!r}'.format(opcode))
else:
data.append(payload)
if opcode == OPCODE_TEXT:
return Message(OPCODE_TEXT, b''.join(data).decode('utf-8'), '')
else:
return Message(OPCODE_BINARY, b''.join(data), '')
class WebSocketWriter:
def __init__(self, transport):
self.transport = transport
def _send_frame(self, message, opcode):
"""Send a frame over the websocket with message as its payload."""
header = bytes([0x80 | opcode])
msg_length = len(message)
if msg_length < 126:
header += bytes([msg_length])
elif msg_length < (1 << 16):
header += bytes([126]) + struct.pack('!H', msg_length)
else:
header += bytes([127]) + struct.pack('!Q', msg_length)
self.transport.write(header + message)
def pong(self):
"""Send pong message."""
self._send_frame(b'', OPCODE_PONG)
def ping(self):
"""Send pong message."""
self._send_frame(b'', OPCODE_PING)
def send(self, message, binary=False):
"""Send a frame over the websocket with message as its payload."""
if isinstance(message, str):
message = message.encode('utf-8')
if binary:
self._send_frame(message, OPCODE_BINARY)
else:
self._send_frame(message, OPCODE_TEXT)
def close(self, code=1000, message=b''):
"""Close the websocket, sending the specified code and message."""
if isinstance(message, str):
message = message.encode('utf-8')
self._send_frame(
struct.pack('!H%ds' % len(message), code, message),
opcode=OPCODE_CLOSE)
def do_handshake(method, headers, transport):
"""Prepare WebSocket handshake. It return http response code,
response headers, websocket parser, websocket writer. It does not
perform any IO."""
# WebSocket accepts only GET
if method.upper() != 'GET':
raise errors.HttpErrorException(405, headers=(('Allow', 'GET'),))
headers = dict(((hdr, val) for hdr, val in headers if hdr in WS_HDRS))
if 'websocket' != headers.get('UPGRADE', '').lower().strip():
raise errors.BadRequestException(
'No WebSocket UPGRADE hdr: {}\n'
'Can "Upgrade" only to "WebSocket".'.format(
headers.get('UPGRADE')))
if 'upgrade' not in headers.get('CONNECTION', '').lower():
raise errors.BadRequestException(
'No CONNECTION upgrade hdr: {}'.format(
headers.get('CONNECTION')))
# check supported version
version = headers.get('SEC-WEBSOCKET-VERSION')
if version not in ('13', '8', '7'):
raise errors.BadRequestException(
'Unsupported version: {}'.format(version))
# check client handshake for validity
key = headers.get('SEC-WEBSOCKET-KEY')
try:
if not key or len(base64.b64decode(key)) != 16:
raise errors.BadRequestException(
'Handshake error: {!r}'.format(key))
except binascii.Error:
raise errors.BadRequestException(
'Handshake error: {!r}'.format(key)) from None
# response code, headers, parser, writer
return (101,
(('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('TRANSFER-ENCODING', 'chunked'),
('SEC-WEBSOCKET-ACCEPT', base64.b64encode(
hashlib.sha1(key.encode() + WS_KEY).digest()).decode())),
WebSocketParser,
WebSocketWriter(transport))
|
[
"fafhrd91@gmail.com"
] |
fafhrd91@gmail.com
|
67624c3041be101cc92c160d6d7e7fd3442377f3
|
832f86e052d90916fb0c8156825c87dc13c0443e
|
/imported-from-gmail/2020-05-03-invert-a-binary-tree.py
|
3afada0e040cf5b62868c3542fa850c26f003171
|
[] |
no_license
|
johncornflake/dailyinterview
|
292615849cea62cb945ecc7039c594b6966a81f3
|
91bb0edb9e25255e6222279109c15ae9d203970c
|
refs/heads/master
| 2022-12-09T21:02:12.204755
| 2021-06-07T13:09:34
| 2021-06-07T13:09:34
| 225,059,833
| 0
| 0
| null | 2022-12-08T11:27:38
| 2019-11-30T19:24:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
Hi, here's your problem today. (You've reached the end of the problems for now - in the meanwhile, here is a random question. And visit
CoderPro
for more practice!) This problem was recently asked by Twitter:
You are given the root of a binary tree. Invert the binary tree in place. That is, all left children should become right children, and all right children should become left children.
Example:
a
/
\
b c
/
\
/
d e f
The inverted version of this tree is as follows:
a
/
\
c b
\
/
\
f e d
Here is the function signature:
class
Node
:
def
__init__
(
self
,
value
):
self
.
left
=
None
self
.
right
=
None
self
.
value
=
value
def
preorder
(
self
):
print
self
.
value
,
if
self
.
left
:
self
.
left
.
preorder
()
if
self
.
right
:
self
.
right
.
preorder
()
def
invert
(
node
):
# Fill this in.
root
=
Node
(
'a'
)
root
.
left
=
Node
(
'b'
)
root
.
right
=
Node
(
'c'
)
root
.
left
.
left
=
Node
(
'd'
)
root
.
left
.
right
=
Node
(
'e'
)
root
.
right
.
left
=
Node
(
'f'
)
root
.
preorder
()
# a b d e c f
print
"\n"
invert
(
root
)
root
.
preorder
()
# a c f b e d
|
[
"chadeous@gmail.com"
] |
chadeous@gmail.com
|
5e45efaf0a3d7732008c6b31f63ea03a3f44c0fe
|
d75359fde22b08a4109b30bb39c9db27961fa417
|
/loginpass/github.py
|
794d62418c269dca14a72d1f15a6c4568fd4dea5
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
authlib/loginpass
|
58f0881b4e5975c305e633337d1b86657bea907b
|
635823a78a2a92cf8630f9935aebb9afcccb8656
|
refs/heads/master
| 2022-06-08T13:08:09.271879
| 2020-12-08T06:04:39
| 2020-12-08T06:04:39
| 128,506,236
| 280
| 95
|
BSD-3-Clause
| 2022-05-13T19:30:54
| 2018-04-07T07:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
"""
loginpass.github
~~~~~~~~~~~~~~~~
Loginpass Backend of GitHub (https://github.com).
Useful Links:
- Create App: https://github.com/settings/developers
- API documentation: https://developer.github.com/v3/
:copyright: (c) 2018 by Hsiaoming Yang
:license: BSD, see LICENSE for more details.
"""
from authlib.oidc.core import UserInfo
class GitHub(object):
NAME = 'github'
OAUTH_CONFIG = {
'api_base_url': 'https://api.github.com/',
'access_token_url': 'https://github.com/login/oauth/access_token',
'authorize_url': 'https://github.com/login/oauth/authorize',
'client_kwargs': {'scope': 'user:email'},
'userinfo_endpoint': 'https://api.github.com/user',
}
def userinfo(self, **kwargs):
resp = self.get(self.OAUTH_CONFIG['userinfo_endpoint'], **kwargs)
data = resp.json()
params = {
'sub': str(data['id']),
'name': data['name'],
'email': data.get('email'),
'preferred_username': data['login'],
'profile': data['html_url'],
'picture': data['avatar_url'],
'website': data.get('blog'),
}
# The email can be be None despite the scope being 'user:email'.
# That is because a user can choose to make his/her email private.
# If that is the case we get all the users emails regardless if private or note
# and use the one he/she has marked as `primary`
if params.get('email') is None:
resp = self.get('user/emails', **kwargs)
resp.raise_for_status()
data = resp.json()
params["email"] = next(email['email'] for email in data if email['primary'])
return UserInfo(params)
|
[
"me@lepture.com"
] |
me@lepture.com
|
ee32eb4c5bf0fa3b9827e0a11a8b943fbb9b709d
|
5c5b53b686cd11b76772768096d096b1f2e0636b
|
/codingtest/findCityBJ18352.py
|
d3ad4ff983e5fb5e1aeb7f0e0ff5922c2ea6daa9
|
[] |
no_license
|
jewerlykim/python_Algorithm
|
3e72fe9b145ff172ca76c9d59fd1be6246513ae5
|
e83b7e8fc58807ebf05f423d962ad9ce37100ada
|
refs/heads/main
| 2023-08-22T10:23:29.764080
| 2021-10-14T16:13:14
| 2021-10-14T16:13:14
| 322,784,608
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
import sys
from collections import deque
# sys.stdin = open("/Users/jewerlykim/Desktop/python_Algorithm/codingtest/18352.txt", 'r')
cityNumber, roadNumber, distance, startCityNumber = map(int, sys.stdin.readline().split())
graph = [[] for _ in range(cityNumber+1)]
visited = [False for _ in range(cityNumber+1)]
distanceGraph = [0 for _ in range(cityNumber+1)]
for _ in range(roadNumber):
departure, arrive = map(int, sys.stdin.readline().split())
graph[departure].append(arrive)
def bfs(startCityNumber):
queue = deque()
queue.append(startCityNumber)
visited[startCityNumber] = True
while queue:
city = queue.popleft()
for i in graph[city]:
if visited[i] == False:
queue.append(i)
visited[i] = True
distanceGraph[i] = distanceGraph[city] + 1
bfs(startCityNumber)
exist = False
for i, value in enumerate(distanceGraph):
if value == distance:
print(i)
exist = True
if not exist:
print(-1)
|
[
"jsjs21good@gmail.com"
] |
jsjs21good@gmail.com
|
0064386a9682b065d6f45192f430c384290a799a
|
c929632612012a436fdec27771a6a8716cdb24bf
|
/setup.py
|
047ed9b775f9449564700e84813c16890e9b8e7d
|
[] |
no_license
|
imclab/python-ucto
|
bae2f082068261b70bf71ea681b92bf6e2e6263b
|
a07e87f568cdb342b84f7b76e9f466d507567211
|
refs/heads/master
| 2021-01-14T10:30:27.892695
| 2016-03-10T14:49:58
| 2016-03-10T14:49:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
#!/usr/bin/env python
from distutils.core import setup, Extension
from Cython.Distutils import build_ext
import glob
import os
import sys
from os.path import expanduser
HOMEDIR = expanduser("~")
VERSION = '0.3.0'
includedirs = [HOMEDIR + '/local/include/','/usr/include/', '/usr/include/libxml2','/usr/local/include/' ]
libdirs = [HOMEDIR + '/local/lib/','/usr/lib','/usr/local/lib']
if 'VIRTUAL_ENV' in os.environ:
includedirs.insert(0,os.environ['VIRTUAL_ENV'] + '/include')
libdirs.insert(0,os.environ['VIRTUAL_ENV'] + '/lib')
if sys.version < '3':
extensions = [ Extension("ucto",
[ "libfolia_classes.pxd", "ucto_classes.pxd", "ucto_wrapper2.pyx"],
language='c++',
include_dirs=includedirs,
library_dirs=libdirs,
libraries=['ucto','folia'],
extra_compile_args=['--std=c++0x'],
) ]
else:
extensions = [ Extension("ucto",
[ "libfolia_classes.pxd", "ucto_classes.pxd", "ucto_wrapper.pyx"],
language='c++',
include_dirs=includedirs,
library_dirs=libdirs,
libraries=['ucto','folia'],
extra_compile_args=['--std=c++0x'],
) ]
setup(
name = 'python-ucto',
version = VERSION,
author = 'Maarten van Gompel',
author_email = "proycon@anaproy.nl",
description = ("This is a Python binding to the tokenizer Ucto. Tokenisation is one of the first step in almost any Natural Language Processing task, yet it is not always as trivial a task as it appears to be. This binding makes the power of the ucto tokeniser available to Python. Ucto itself is a regular-expression based, extensible, and advanced tokeniser written in C++ (https://languagemachines.github.io/ucto)."),
license = "GPL",
keywords = "tokenizer tokenization tokeniser tokenisation nlp computational_linguistics ucto",
url = "https://github.com/proycon/python-ucto",
ext_modules = extensions,
cmdclass = {'build_ext': build_ext},
requires=['ucto (>=0.8.4)'],
install_requires=['Cython'],
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Text Processing :: Linguistic",
"Programming Language :: Cython",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Operating System :: POSIX",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
],
)
|
[
"proycon@anaproy.nl"
] |
proycon@anaproy.nl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.