blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cfbd00fd91d4aaf5d1b858f999bbb011bd8c64b4 | 48603962470b1858984342cc649b6b5376db4b9e | /user/admin.py | 4e46d6e667974c16ec527eda8660e9918a20b743 | [] | no_license | hacking-mango/hackerthon_team9_backend | 7589c0217652c61f033c292feb0f31e62db1328b | 779c010c743ae71f6ff6cb6222b1811606416079 | refs/heads/main | 2023-06-16T15:14:05.181076 | 2021-07-11T03:29:13 | 2021-07-11T03:29:13 | 382,746,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from django.contrib import admin
from .models import User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = ("nickname", "email", "profile_image", "created_at", "position")
list_display_links = (
"nickname",
"email",
)
| [
"tjdntjr123@gmail.com"
] | tjdntjr123@gmail.com |
1ef30fbace05797d49c902a596b908d39fd97ed2 | f889bc01147869459c0a516382e7b95221295a7b | /test/test_body_64.py | d35630adc76fa2b92c13394be8a3dbecbb91f624 | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.body_64 import Body64
class TestBody64(unittest.TestCase):
""" Body64 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testBody64(self):
"""
Test Body64
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.body_64.Body64()
pass
if __name__ == '__main__':
unittest.main()
| [
"sander@wildatheart.eu"
] | sander@wildatheart.eu |
60efe30010d22cb52db0a4c4d10f599a859c97e4 | bcf85b8dc5aa1c98af4a61c0404b0be7563ebfee | /ecommerc2-API/src/orders/migrations/0005_auto_20170124_1611.py | ca9e35c278df75c8f47761c1a3bba7a3ef69730f | [] | no_license | karolius/ecommerc2-api | 39bdf662d54cb4be8db27d346efd27a7fbc8be84 | efaf0cc5b7251e1b55fc08f68d7fab00974d25e7 | refs/heads/master | 2021-01-11T09:10:17.455407 | 2017-02-16T23:45:16 | 2017-02-16T23:45:16 | 81,380,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-24 15:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0004_order'),
]
operations = [
migrations.AlterField(
model_name='order',
name='billing_address',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='billing_address', to='orders.UserAddress'),
),
migrations.AlterField(
model_name='order',
name='shipping_address',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address', to='orders.UserAddress'),
),
migrations.AlterField(
model_name='order',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='orders.UserCheckout'),
),
]
| [
"karolius127@gmail.com"
] | karolius127@gmail.com |
f353000de3bc8694b405d19bd1acbf833e3fe217 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /115_testing/_exercises/_templates/temp/Github/_Level_2/python_test_api-master/test_update_issue.py | 34fc6833a45bee855026e38aae7fa03bdf8eda83 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,365 | py | ____ base_api ______ BaseApi
______ xmltodict
______ d_t_
______ requests
c_ TestUpdateIssue(BaseApi
___ test_update_issue
issue_id _ _create_issue() # obviously we need to create an issue before updating it
url _ base_url + '/issue/' + issue_id
current_time _ st.(d_t_.d_t_.now())[0:-7]
issue_summary _ 'Summary updated at ' + current_time
issue_description _ 'Description updated at ' + current_time
params _ {
'summary': issue_summary,
'description': issue_description
}
r _ request(url, 'post', params)
assert_for_status_code_and_content_type(r, 200, 'text/plain;charset=UTF-8')
url _ base_url + '/issue/' + issue_id
r _ request(url, 'get')
response_dict _ xmltodict.parse(r.text)
assert_for_status_code_and_content_type(r, 200)
aE..(response_dict['issue']['@id'], issue_id)
___ field __ response_dict['issue']['field']:
__ field['@name'] __ 'summary':
aE..(field['value'], issue_summary)
__ field['@name'] __ 'description':
aE..(field['value'], issue_description)
___ test_update_not_existing_issue
url _ base_url + '/issue/' + 'kjhfkaskafk'
current_time _ st.(d_t_.d_t_.now())[0:-7]
issue_summary _ 'Summary updated at ' + current_time
issue_description _ 'Description updated at ' + current_time
params _ {
'summary': issue_summary,
'description': issue_description
}
r _ request(url, 'post', params)
response_dict _ xmltodict.parse(r.text)
assert_for_status_code_and_content_type(r, 404)
aT..(response_dict['error'])
___ test_update_issue_without_credentials
issue_id _ _create_issue()
url _ base_url + '/issue/' + issue_id
current_time _ st.(d_t_.d_t_.now())[0:-7]
issue_summary _ 'Summary updated at ' + current_time
issue_description _ 'Description updated at ' + current_time
params _ {
'summary': issue_summary,
'description': issue_description
}
r _ requests.post(url, params)
assert_for_status_code_and_content_type(r, 401)
response_dict _ xmltodict.parse(r.text)
aT..(response_dict['error'])
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
b7b54593d0b42f343c2a103add3226a69f8d758e | 1b2369715f47c9276f3dd458541d0b62cf5ba237 | /models.py | 1014224267a3e98c3240a25bef788a2aecbf1fa0 | [] | no_license | Virucek/gb_framework | 5a68cdf4f09867db3704ec589e937ddbe68b27f0 | 50893554c80583243ed301ab52e4bc46875ad241 | refs/heads/main | 2023-02-13T14:01:57.808400 | 2021-01-04T22:20:07 | 2021-01-04T22:20:07 | 319,729,864 | 0 | 0 | null | 2021-01-04T22:20:20 | 2020-12-08T18:44:10 | Python | UTF-8 | Python | false | false | 5,503 | py | """ Модели проекта """
import json
from patterns.behavioral.observer import Observer, Subject
from patterns.creational.prototype import PrototypeMixin
from patterns.orm.unit_of_work import DomainObject
class User:
def __init__(self, name):
self.name = name
class Student(User, DomainObject):
def __init__(self, name):
self.courses = []
super(Student, self).__init__(name)
class Teacher(User, DomainObject):
pass
class UserFactory:
types = {
'student': Student,
'teacher': Teacher,
}
@classmethod
def create(cls, type_name, name):
return cls.types[type_name](name)
# Категории Курсов
class Category(DomainObject):
# id_ = 0
def __init__(self, name, parent_category=None):
# self.id = Category.id_
# Category.id_ += 1
self.name = name
self.courses = []
self.parent_category = parent_category
self.child_categories = []
def __getitem__(self, item):
return self.courses[item]
@property
def course_count(self):
res = len(self.courses)
if self.child_categories:
for cat_ in self.child_categories:
res += cat_.course_count
return res
def add_child(self, category):
self.child_categories.append(category)
# Курсы
class Course(PrototypeMixin, Subject):
def __init__(self, name, category):
self.name = name
self.category = category
self.category.courses.append(self)
self.students = []
super().__init__()
def __getitem__(self, item):
return self.students[item]
def add_student(self, student):
self.students.append(student)
student.courses.append(self)
self._subject_state = student
self._notify()
return CourseStudent(self, student)
@property
def new_student(self):
return self._subject_state
class OnlineCourse(Course, DomainObject):
pass
class OfflineCourse(Course, DomainObject):
pass
class CourseFactory:
types = {
'online': OnlineCourse,
'offline': OfflineCourse,
}
@classmethod
def create(cls, type_name, name, category):
return cls.types[type_name](name, category)
class SmsNotifier(Observer):
def update(self, subject):
print(f'SMS: студент {subject.new_student.name} присоединился к курсу {subject.name}')
class EmailNotifier(Observer):
def update(self, subject):
print(f'EMAIL: студент {subject.new_student.name} присоединился к курсу {subject.name}')
class CourseStudent(DomainObject): # Курс - студент, связь многие ко многим
def __init__(self, course, student):
self.course = course
self.student = student
class BaseSerializer:
def __init__(self, object):
self.object = object
def save(self):
try:
return json.dumps(self.object)
except TypeError as e:
print(f'Problem trying to serialize object to json:\n {e}')
def load(self):
try:
return json.loads(self.object)
except json.JSONDecodeError as e:
print(f'Problem trying to deserialize object from json:\n {e}')
# Основной интерфейс
class MainInterface:
def __init__(self):
self.teachers = []
self.students = []
self.courses = []
self.categories = []
@staticmethod
def create_user(type_name, name):
return UserFactory.create(type_name, name)
@staticmethod
def create_category(name, parent_category=None):
category = Category(name, parent_category)
if parent_category is not None:
parent_category.add_child(category)
return category
@staticmethod
def create_course(type_name, name, category):
return CourseFactory.create(type_name, name, category)
def get_category_by_id(self, category_id):
for cat in self.categories:
if cat.id == category_id:
return cat
raise Exception(f'Категория с id {id} отсутствует')
def get_courses_by_category(self, category_id):
try:
category_id = int(category_id)
except ValueError:
print('Category id должен быть числом!')
else:
category = self.get_category_by_id(category_id)
return category.courses
@staticmethod
def get_course_types():
return list(CourseFactory.types.keys())
def get_course_by_name(self, name):
for course in self.courses:
if course.name == name:
return course
raise Exception(f'Курс с именем {name} отсутствует')
def get_category_tree(self):
categories_list = []
if self.categories:
for cat in self.categories:
if cat.parent_category is None:
categories_list.append(cat)
return categories_list
def get_students_by_course(self, course):
course = self.get_course_by_name(course)
return course.students
def get_student_by_name(self, name):
for student in self.students:
if student.name == name:
return student
raise Exception(f'Студент с именем {name} отсутствует')
| [
"aykin.yakov@gmail.com"
] | aykin.yakov@gmail.com |
3b41186613bbe1df0198661313cd981b26eee414 | 8531bee5e42d82ffc09ef274ccb2333ca02f03cc | /python/boj/2566.py | 4a6668ddf97b156af575a8387cf04a1e04a2ed03 | [] | no_license | ChoHyoungSeo/Algorithm_prac | 969f193f9ba349d0df3c1d7645fe3f42ec7581c4 | 02327623b5ea2211f4618e60b0bdcc61e16e1c5a | refs/heads/master | 2023-08-17T11:26:55.159895 | 2023-08-17T10:52:34 | 2023-08-17T10:52:34 | 210,917,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | ans = 0
ans_idx01 = 0
ans_idx02 = 0
for i in range(9):
tmp = list(map(int, input().split()))
if max(tmp) > ans:
ans_idx01 = i
ans_idx02 = tmp.index(max(tmp))
ans = max(tmp)
print(ans)
print(ans_idx01 + 1, ans_idx02+1) | [
"francesco.johs@gmail.com"
] | francesco.johs@gmail.com |
de8b7b31daba8b0d65d76e9630be8f028152e974 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/mnurolcay/2011/util/misc/q4wine/actions.py | 461a1ecbe7b16a54ce9395b37efa96f01b7fe94b | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import cmaketools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
WorkDir="q4wine-%s" % get.srcVERSION().replace("_0", "-r")
def setup():
cmaketools.configure("-DWITH_ICOUTILS=ON \
-DWITH_WINETRIKS=ON \
-DWITH_WINEAPPDB=ON \
-DWITH_DBUS=ON")
def build():
cmaketools.make()
def install():
cmaketools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("ChangeLog", "copying", "LICENSE", "README")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
bc99649fa3b238fc810e29b01a44612be761c05b | 7ebc4e9ade9c0c0312c87d74f94929e5c3bf96a6 | /aaai/compute-results-transfer-pca.py | 647d34f500f58a76c7c7f9d996bed06f9acb9606 | [] | no_license | nipunbatra/transferable-energy-breakdown-old | 7fa02dd84b0eb37875c190c5a06bfc9d1a2a9218 | bc12de92d620d33e1ca4cf841af341eb3d4bcd76 | refs/heads/master | 2021-08-22T09:24:40.322431 | 2017-11-29T21:18:43 | 2017-11-29T21:18:43 | 78,712,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | from aaai18.common import compute_rmse_fraction
from create_matrix import *
appliance_index = {appliance: APPLIANCES_ORDER.index(appliance) for appliance in APPLIANCES_ORDER}
import os
import pickle
source, target = sys.argv[1:]
cost = 'l21'
out = {}
for static_fac in ['None']:
out[static_fac] = {}
for lam in [0]:
out[static_fac][lam] = {}
for train_percentage in range(10, 110, 10):
out[static_fac][lam][train_percentage] ={}
for random_seed in range(5):
out[static_fac][lam][train_percentage][random_seed] = {}
name = "{}-{}-{}-{}-{}-{}-{}".format(source, target, static_fac, lam, random_seed, train_percentage,
cost)
directory = os.path.expanduser('~/aaai2017/pca-transfer_{}_{}_{}/'.format(source, target, cost))
filename = os.path.join(directory, name + '.pkl')
try:
pr = pickle.load(open(filename, 'r'))
pred = pr['Predictions']
for appliance in APPLIANCES_ORDER[1:]:
prediction = pred[appliance]
if appliance == "hvac":
prediction = prediction[range(4, 10)]
out[static_fac][lam][train_percentage][random_seed][appliance]= \
compute_rmse_fraction(appliance, prediction, target)[2]
print("Computed for: {}".format(name))
except Exception, e:
print(e)
print("Exception")
out[static_fac][lam][train_percentage] = pd.DataFrame(out[static_fac][lam][train_percentage]).mean(axis=1)
pickle.dump(out, open("predictions/pca-{}-{}-transfer-cv.pkl".format(source, target),"w"))
| [
"nipunb@iiitd.ac.in"
] | nipunb@iiitd.ac.in |
6605bf10714ea0486e0502e4fc5f35a2777c12f3 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/MerchantBrandListResult.py | eae81a409220a5d4ea5ff4cfe0ca4568824e6cec | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,655 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BrandResult import BrandResult
class MerchantBrandListResult(object):
def __init__(self):
self._brand_list_result = None
@property
def brand_list_result(self):
return self._brand_list_result
@brand_list_result.setter
def brand_list_result(self, value):
if isinstance(value, list):
self._brand_list_result = list()
for i in value:
if isinstance(i, BrandResult):
self._brand_list_result.append(i)
else:
self._brand_list_result.append(BrandResult.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.brand_list_result:
if isinstance(self.brand_list_result, list):
for i in range(0, len(self.brand_list_result)):
element = self.brand_list_result[i]
if hasattr(element, 'to_alipay_dict'):
self.brand_list_result[i] = element.to_alipay_dict()
if hasattr(self.brand_list_result, 'to_alipay_dict'):
params['brand_list_result'] = self.brand_list_result.to_alipay_dict()
else:
params['brand_list_result'] = self.brand_list_result
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MerchantBrandListResult()
if 'brand_list_result' in d:
o.brand_list_result = d['brand_list_result']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
f84d21bcb1c3dcf50eec8e83c47db0eb90eada0f | e77b92df446f0afed18a923846944b5fd3596bf9 | /Inflearn_algo/section4_BinarySearch/pro3_Music_BS.py | 78e5a18de62527a544f7fb265a1f3d0e25738669 | [] | no_license | sds1vrk/Algo_Study | e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e | fbbc21bb06bb5dc08927b899ddc20e6cde9f0319 | refs/heads/main | 2023-06-27T05:49:15.351644 | 2021-08-01T12:43:06 | 2021-08-01T12:43:06 | 356,512,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | # 뮤직비디오 (결정 알고리즘)
# M개 이하로 짜르기
import sys
sys.stdin=open("input.txt","r")
n,m=map(int,input().split())
a=list(map(int,input().split()))
# 결정 알고리즘, lt와 rt정하기
lt=1
rt=sum(a)
res=0
def count(mid):
# cnt는 DVD의 저장은 일단 1개
cnt=1
# DVD에 저장되는 노래들의 합
hap=0
for i in a:
hap+=i
if hap>mid:
# 용량이 초과되서 새로운 DVD가 필요
cnt+=1
# 그리고 첫번째 곡으로 i가 들어감
hap=i
return cnt
maxx=max(a)
while lt<=rt:
# 자를수 있는 중간값 찾기
mid=(lt+rt)//2
# 만약 count함수에 들어가서 나온 값이 찾을려는 m값 이하이면 정답이 되기에 이것을 mid값에 넣고
# 3개이하로 만들수 있으면 2,3개 가능하기에 이하로 넣는다.
# 그리고 최소값을 찾기 위해서 rt값을 줄인다
# rt값을 줄일수록 mid 값이 작아지기 떄문에
# 추가적으로 mid>=maxx 를 해준 이유는 123456789 9 9 라고 했을경우 9분 보다 큰것은 따로 들어가야 되므로 mid>=maxx라는 조건을 써준다.
# mid는 capcity
if mid>=maxx and count(mid)<=m:
res=mid
rt=mid-1
else :
# 나온값이 4,5,6 이러면 lt를 늘려서 더 작게 만들어야 됨
lt=mid+1
print(res) | [
"51287886+sds1vrk@users.noreply.github.com"
] | 51287886+sds1vrk@users.noreply.github.com |
0d007d7be2a8e9d963defe5cdede219ecbc18387 | 5d7ad4d331ee029101fe6a239db9d0e7eebedaae | /2BFS/71. Binary Tree Zigzag Level Order Traversal.py | b42fb0a663f8aa8b984b7c718f98932618353079 | [] | no_license | Iansdfg/9chap | 77587e1c99b34d13b49a86ed575ec89db421551e | d7b42f94c20f6a579ad59d9f894dcc2fbc5a0501 | refs/heads/master | 2022-11-15T02:21:33.791516 | 2022-11-09T06:24:43 | 2022-11-09T06:24:43 | 201,677,377 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | from lintcode import (
TreeNode,
)
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
from collections import deque
class Solution:
"""
@param root: A Tree
@return: A list of lists of integer include the zigzag level order traversal of its nodes' values.
"""
def zigzag_level_order(self, root):
# write your code here
if not root:
return []
queue = deque([root])
res = []
flag = 0
while queue:
lv = []
for i in range(len(queue)):
curr = queue.popleft()
lv.append(curr.val)
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
if flag%2 == 0:
res.append(lv)
else:
lv.reverse()
res.append(lv)
flag += 1
return res
| [
"noreply@github.com"
] | Iansdfg.noreply@github.com |
9eb8aa52e4354fd7124a2cbb45bc1af55175c0a1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03588/s218611558.py | 18c51109a42bf901002b08e02b1cd616be2108ed | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | N = int(input())
AB = [list(map(int,input().split())) for _ in range(N)]
AB = sorted(AB)
#print(AB)
ans = N + AB[-1][1]
for i in range(N):
if i == 0:
ans += AB[0][0] - 1
else:
ans += min(abs(AB[i][0]-AB[i-1][0])-1,abs(AB[i][1]-AB[i-1][1])-1)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5d2ca49923ba3a1183315f8fa2e3b16ee5fdc819 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/InnerDetector/InDetExample/InDetSLHC_Example/share/ExtrapolationEngineTest_ITk_jobOptions.py | 359c9700d941710604b7d03b1f4f850021f23f80 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,822 | py | ###############################################################
#
# Job options
#
# This is for validation of ITk tracking geometry.
# Modified version of Tracking/TrkExtrapolation/TrkExUnitTests/share/ExtrapolationEngineTest_jobOptions.py. (TrkExUnitTests-00-00-08)
# See https://twiki.cern.ch/twiki/bin/view/Atlas/UpgradeSimulationInnerTrackerMigrationRel20p3p3#Validation_of_database_files
#==============================================================
#--------------------------------------------------------------
# ATLAS default Application Configuration options
#--------------------------------------------------------------
# Use McEventSelector so we can run with AthenaMP
import AthenaCommon.AtlasUnixGeneratorJob
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
#--------------------------------------------------------------
# Geometry section
#--------------------------------------------------------------
from AthenaCommon.DetFlags import DetFlags
DetFlags.ID_setOn()
DetFlags.Calo_setOff()
DetFlags.Muon_setOff()
include("InDetSLHC_Example/preInclude.SLHC.py")
include("InDetSLHC_Example/preInclude.SiliconOnly.py")
# Full job is a list of algorithms
from AthenaCommon.AlgSequence import AlgSequence
job = AlgSequence()
# build GeoModel
if 'DetDescrVersion' not in dir():
DetDescrVersion = 'ATLAS-P2-ITK-01-00-00'
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetDescrVersion = DetDescrVersion
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from GeoModelSvc.GeoModelSvcConf import GeoModelSvc
GeoModelSvc = GeoModelSvc()
ServiceMgr += GeoModelSvc
GeoModelSvc.AtlasVersion = DetDescrVersion
from IOVDbSvc.CondDB import conddb
conddb.setGlobalTag('OFLCOND-SIM-00-00-00')
# switch the material loading off
from TrkDetDescrSvc.TrkDetDescrJobProperties import TrkDetFlags
TrkDetFlags.PixelBuildingOutputLevel = INFO
TrkDetFlags.SCT_BuildingOutputLevel = INFO
TrkDetFlags.TRT_BuildingOutputLevel = INFO
TrkDetFlags.ConfigurationOutputLevel = INFO
TrkDetFlags.TRT_BuildStrawLayers = True
TrkDetFlags.SLHC_Geometry = True
TrkDetFlags.MaterialDatabaseLocal = True
if TrkDetFlags.MaterialDatabaseLocal() is True :
TrkDetFlags.MaterialSource = 'COOL'
TrkDetFlags.MaterialVersion = 17
TrkDetFlags.MaterialSubVersion = ""
# splitGeo = DetDescrVersion.split('-')
# TrkDetFlags.MaterialMagicTag = splitGeo[0] + '-' + splitGeo[1] + '-' + splitGeo[2]
TrkDetFlags.MaterialMagicTag = DetDescrVersion
TrkDetFlags.MaterialStoreGateKey = '/GLOBAL/TrackingGeo/LayerMaterialITK'
TrkDetFlags.MaterialDatabaseLocalPath = './'
TrkDetFlags.MaterialDatabaseLocalName = 'AtlasLayerMaterial-'+DetDescrVersion+'.db'
# load the tracking geometry service
from TrkDetDescrSvc.AtlasTrackingGeometrySvc import AtlasTrackingGeometrySvc
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
# Number of events to be processed (default is until the end of
# input, or -1, however, since we have no input, a limit needs
# to be set explicitly, here, choose 10)
theApp.EvtMax = 1 # 100
ExToolOutputLevel = VERBOSE # INFO #
ExAlgorithmOutputLevel = INFO #
from AthenaCommon.AppMgr import ServiceMgr
# output level
ServiceMgr.MessageSvc.OutputLevel = INFO
# increase the number of letter reserved to the alg/tool name from 18 to 30
ServiceMgr.MessageSvc.Format = "% F%50W%S%7W%R%T %0W%M"
# to change the default limit on number of message
ServiceMgr.MessageSvc.defaultLimit = 9999999 # all messages
#--------------------------------------------------------------
# Tool setup
#--------------------------------------------------------------
# the magnetic field
from MagFieldServices import SetupField
from IOVDbSvc.CondDB import conddb
conddb.addOverride('/GLOBAL/BField/Map','BFieldMap-FullAsym-09-solTil3')
from TrkExEngine.AtlasExtrapolationEngine import AtlasExtrapolationEngine
ExtrapolationEninge = AtlasExtrapolationEngine(name='Extrapolation', nameprefix='Atlas', ToolOutputLevel=ExToolOutputLevel)
ToolSvc += ExtrapolationEninge
#--------------------------------------------------------------
# Algorithm setup
#--------------------------------------------------------------
# Add top algorithms to be run
from TrkExUnitTests.TrkExUnitTestsConf import Trk__ExtrapolationEngineTest
ExtrapolationEngineTest = Trk__ExtrapolationEngineTest('ExtrapolationEngineTest')
# parameters mode: 0 - neutral tracks, 1 - charged particles
ExtrapolationEngineTest.ParametersMode = 1
# do the full test backwards as well
ExtrapolationEngineTest.BackExtrapolation = False
# pT range for testing
ExtrapolationEngineTest.PtMin = 100000
ExtrapolationEngineTest.PtMax = 100000
# The test range in Eta
ExtrapolationEngineTest.EtaMin = -0.5
ExtrapolationEngineTest.EtaMax = 0.5
# Configure how you wanna run
ExtrapolationEngineTest.CollectSensitive = True
ExtrapolationEngineTest.CollectPassive = True
ExtrapolationEngineTest.CollectBoundary = True
# the path limit to test
ExtrapolationEngineTest.PathLimit = -1.
# give it the engine
ExtrapolationEngineTest.ExtrapolationEngine = ExtrapolationEninge
# output formatting
ExtrapolationEngineTest.OutputLevel = ExAlgorithmOutputLevel
job += ExtrapolationEngineTest # 1 alg, named 'ExtrapolationEngineTest'
#################################################################
theApp.Dlls += [ 'RootHistCnv' ]
theApp.HistogramPersistency = 'ROOT'
# --- load AuditorSvc
from AthenaCommon.ConfigurableDb import getConfigurable
# --- write out summary of the memory usage
# | number of events to be skip to detect memory leak
# | 20 is default. May need to be made larger for complete jobs.
ServiceMgr.AuditorSvc += getConfigurable('ChronoAuditor')()
# --- write out a short message upon entering or leaving each algorithm
#
theApp.AuditAlgorithms = True
theApp.AuditServices = True
#
# --- Display detailed size and timing statistics for writing and reading
ServiceMgr.AthenaPoolCnvSvc.UseDetailChronoStat = True
if not hasattr(ServiceMgr, 'THistSvc'):
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc()
# add the G4 validation output stream
ServiceMgr.THistSvc.Output += [ "val DATAFILE='ExtrapolationEngineTest.root' TYPE='ROOT' OPT='RECREATE'" ]
include("InDetSLHC_Example/postInclude.SLHC_Setup.py")
#==============================================================
#
# End of job options file
#
###############################################################
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
fca0cc8ccedd00d95f5501264f237a8c3a36c9b9 | e8cac4db53b22a28f7421ede9089bd3d4df81c82 | /TaobaoSdk/Request/TraderateAddRequest.py | 05131877e37c9437578f810351d4d96396b6af27 | [] | no_license | wangyu0248/TaobaoOpenPythonSDK | af14e84e2bada920b1e9b75cb12d9c9a15a5a1bd | 814efaf6e681c6112976c58ec457c46d58bcc95f | refs/heads/master | 2021-01-19T05:29:07.234794 | 2012-06-21T09:31:27 | 2012-06-21T09:31:27 | 4,738,026 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,718 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 新增单个评价(<font color="red">注:在评价之前需要对订单成功的时间进行判定(end_time),如果超过15天,不能再通过该接口进行评价</font>)
# @author wuliang@maimiaotech.com
# @date 2012-06-21 17:17:38
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">新增单个评价(<font color="red">注:在评价之前需要对订单成功的时间进行判定(end_time),如果超过15天,不能再通过该接口进行评价</font>)</SPAN>
# <UL>
# </UL>
class TraderateAddRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.traderate.add"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">是否匿名,卖家评不能匿名。可选值:true(匿名),false(非匿名)。注意:输入非可选值将会自动转为false</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Boolean</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.anony = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">评价内容,最大长度: 500个汉字 .注意:当评价结果为good时就不用输入评价内容.评价内容为neutral/bad的时候需要输入评价内容</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.content = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">子订单ID</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.oid = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">评价结果,可选值:good(好评),neutral(中评),bad(差评)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.result = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">评价者角色,可选值:seller(卖家),buyer(买家)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.role = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">交易ID</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.tid = None
| [
"liyangmin@maimiaotech.com"
] | liyangmin@maimiaotech.com |
595102c2c26d2c6c754214f9360bfce04c596dad | 95c3c587907ae38b11faacc4d2ebe1df8f5b3335 | /Aug-13-ASSGN-NUMBERS-Q1-Lakshmipriya.py | 848179e0f96b514e18ff3af2c050186eb3818b62 | [] | no_license | sandhyalethakula/Iprimed_16_python | d59cb47d2d2a63c04a658c8b302505efc8f24ff4 | c34202ca155819747a5c5ac4a8a5511d425f41a1 | refs/heads/main | 2023-08-16T11:40:11.005919 | 2021-09-29T13:22:32 | 2021-09-29T13:22:32 | 411,956,547 | 0 | 0 | null | 2021-09-30T06:57:13 | 2021-09-30T06:57:12 | null | UTF-8 | Python | false | false | 468 | py | '''1.Write a program that reads a positive integer, n, from the user and then displays the sum of all of the integers from 1 to n.
The sum of the first n positive integers can be computed using the formula: sum = (n)(n + 1) / 2 '''
print('-'*20)
n = int(input("Enter a positive integer: ")) #Read the input from the user
total = n * (n+1) / 2 #Calculate the sum
print("The sum of the first",n,"positive integers",total)#Display the result
print('-'*20)
| [
"noreply@github.com"
] | sandhyalethakula.noreply@github.com |
fe8c41c0d29dc660a9392cc21375dc8544892ae4 | cffc460605febc80e8bb7c417266bde1bd1988eb | /since2020/ZeroJudge/ZeroJudge e539.py | 8f69b1d47da09e1f2512a1162f47440133be1139 | [] | no_license | m80126colin/Judge | f79b2077f2bf67a3b176d073fcdf68a8583d5a2c | 56258ea977733e992b11f9e0cb74d630799ba274 | refs/heads/master | 2021-06-11T04:25:27.786735 | 2020-05-21T08:55:03 | 2020-05-21T08:55:03 | 19,424,030 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | '''
@judge ZeroJudge
@id e539
@name Circular
@source UVa 967
@tag Prime, Range Query
'''
from sys import stdin
from itertools import accumulate
from math import sqrt
N = 1000000
sieve = [ 1 ] * N
sieve[0] = sieve[1] = 0
for x in range(2, int(sqrt(N) + 0.01) + 1):
if sieve[x]:
for y in range(x * x, N, x):
sieve[y] = 0
def circle(n):
m = str(n) * 2
L = len(m) // 2
return all(map(lambda x: sieve[int(m[x:x+L])], range(L)))
dp = list(accumulate([ 1 if circle(x) else 0 for x in range(N) ]))
def solve(a, b):
res = dp[b] - dp[a - 1]
if res == 0:
return 'No Circular Primes.'
if res == 1:
return '1 Circular Prime.'
return f'{res} Circular Primes.'
for line in stdin:
if line.strip() != '-1':
print(solve(*map(int, line.split()))) | [
"m80126colin@gmail.com"
] | m80126colin@gmail.com |
e147389a483460573bbdcdffcce154acfaca0467 | 835fe55f4ea82b4e92fc3a07336c61c9a4726a44 | /conans/server/rest/controllers/file_upload_download_controller.py | 20d28124339ceb95070a2db072e893f7b9a8686a | [
"MIT"
] | permissive | tru/conan | 0b1ed247b4cf4cb4f66dc5c302edabfb3589d37b | b9266be3cd026e4a8ea1262e557f4259ed36e9f1 | refs/heads/develop | 2021-08-27T18:22:19.614272 | 2016-06-07T17:34:23 | 2016-06-07T17:34:23 | 60,776,756 | 1 | 0 | null | 2017-02-07T22:28:10 | 2016-06-09T13:26:31 | Python | UTF-8 | Python | false | false | 2,813 | py | from conans.server.rest.controllers.controller import Controller
from bottle import request, static_file, FileUpload, cached_property
from conans.server.service.service import FileUploadDownloadService
import os
import sys
from unicodedata import normalize
import six
class FileUploadDownloadController(Controller):
"""
Serve requests related with users
"""
def attach_to(self, app):
storage_path = app.file_manager.paths.store
service = FileUploadDownloadService(app.updown_auth_manager, storage_path)
@app.route(self.route + '/<filepath:path>', method=["GET"])
def get(filepath):
token = request.query.get("signature", None)
file_path = service.get_file_path(filepath, token)
# https://github.com/kennethreitz/requests/issues/1586
mimetype = "x-gzip" if filepath.endswith(".tgz") else "auto"
return static_file(os.path.basename(file_path),
root=os.path.dirname(file_path),
mimetype=mimetype)
@app.route(self.route + '/<filepath:path>', method=["PUT"])
def put(filepath):
token = request.query.get("signature", None)
file_saver = ConanFileUpload(request.body, None,
filename=os.path.basename(filepath), headers=request.headers)
abs_path = os.path.abspath(os.path.join(storage_path, os.path.normpath(filepath)))
# Body is a stringIO (generator)
service.put_file(file_saver, abs_path, token, request.content_length)
return
class ConanFileUpload(FileUpload):
"""Code copied from bottle but removing filename normalizing
FIXME: Review bottle.FileUpload and analyze possible security or general issues """
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
'''
fname = self.raw_filename
if six.PY2:
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
# fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
# fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
| [
"lasote@gmail.com"
] | lasote@gmail.com |
f8e181fb1435b0ee9e82aadf9176c9719f5ac972 | 3a78046505ac496c51978ddcba0f33c6acbeeb98 | /meetings/forms.py | a3fb19ed97b1f4c134c3c7131111680e5cff1c77 | [] | no_license | suryanarayadev/clusil-intranet | 1c46d2508f37050cbb9bd96dad466b24f715d8c2 | cc3fd8d3f35c35e30f72cc501962ea4953ca1945 | refs/heads/master | 2021-01-17T21:40:51.154315 | 2015-09-18T21:04:53 | 2015-09-18T21:04:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | # coding=utf-8
from datetime import date
from django.forms import Form, ModelForm, TextInput, Textarea, HiddenInput, CharField, ModelChoiceField
from django.conf import settings
from .models import Meeting, Location
#location form
class LocationForm(ModelForm):
class Meta:
model = Location
fields = ( 'name', 'address', )
widgets = {
'address' : Textarea(),
}
#modify location wizard forms
class ListLocationsForm(Form):
locations = ModelChoiceField(queryset=Location.objects.all())
#meeting form
class MeetingForm(ModelForm):
additional_message = CharField(label='Message supplémentaire',widget=Textarea(attrs={'placeholder': "Message à transmettre dans l'inviation.",}),required=False)
class Meta:
model = Meeting
fields = ( 'title', 'when', 'time', 'location', 'additional_message', )
widgets = {
'title' : TextInput(attrs={'readonly': 'readonly', }),
'when' : TextInput(attrs={'type': 'date', }),
'time' : TextInput(attrs={'type': 'time', }),
}
#modify wizard forms
class ListMeetingsForm(Form):
meetings = ModelChoiceField(queryset=Meeting.objects.all().order_by('when'))
class ModifyMeetingForm(ModelForm):
class Meta:
model = Meeting
fields = ( 'title', 'when', 'time', 'location', )
widgets = {
'when' : TextInput(attrs={'type': 'date', }),
'time' : TextInput(attrs={'type': 'time', }),
}
| [
"pst@libre.lu"
] | pst@libre.lu |
29a1f909aaed754a504a34fa35b48c5730c71bed | b0303d4cd701574d494be4330ab850ed182520a5 | /Defining Classes/Lab/Solutions/01. Rhombus of Stars.py | 28c4df320c6073ec3efe607ec75bb296fdb96d2f | [] | no_license | rimisarK-blue/Python---OOP | 7e9dec781aac8385d45d2e1474afa6980960f4ff | 73dace732eebf7581466a6fb22b941c30255c650 | refs/heads/master | 2023-03-29T17:28:36.477434 | 2021-03-26T14:04:11 | 2021-03-26T14:04:11 | 342,049,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | def draw_line(count, symbol, offset_count=0):
offset = offset_count * ' '
counted = (f"{symbol} "* count).strip()
print(f"{offset}{counted}")
def draw_rhombus(n):
for i in range(n):
draw_line(i + 1, '*', n - i - 1,)
for i in range(n - 2, -1, -1):
draw_line(i + 1, '*', n - i - 1, )
n = int(input())
draw_rhombus(n) | [
"rimisark92@gmail.com"
] | rimisark92@gmail.com |
641c99b05541aae373077e10268e6323c51d3165 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_BestCycle_LSTM.py | fc40645070b6c9b7dc80e07988908a29182d3e88 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 158 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['LinearTrend'] , ['BestCycle'] , ['LSTM'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
7c3a83c8ebac17237a4c5e636db2c2cc32126d39 | c552bc4a6369654f001cf8ddef66b3079ed26870 | /src/ui/widgets/elevation.py | 54384f1fd2849904de9acb444b30fc9c709dc1a4 | [
"MIT"
] | permissive | SqrtMinusOne/GeoTIFF-3d | a59258fc691365af6efea706bb157b1e60a79854 | cf4bda4b989f0c09fe91f676d4094feb75aa54e7 | refs/heads/master | 2020-05-18T13:55:10.068499 | 2019-10-08T03:52:42 | 2019-10-08T03:53:36 | 184,456,538 | 5 | 0 | MIT | 2019-05-17T15:14:29 | 2019-05-01T17:46:12 | Python | UTF-8 | Python | false | false | 5,017 | py | import sys
import numpy as np
from PyQt5.QtCore import QRect, QRectF, Qt, QLine, QPointF, QPoint
from PyQt5.QtGui import QLinearGradient, QPainter, QPen, QPolygonF
from PyQt5.QtWidgets import (QApplication, QGraphicsItem, QGraphicsScene,
QGraphicsView)
__all__ = ['ElevationGraphWidget']
def getRect(widget):
w, h = widget.width(), widget.height()
y_ = 10
h_ = h - 20
w_ = int(h / 4)
x_ = int((w - w_) / 2)
return QRect(x_, y_, w_, h_)
class ElevationSquare(QGraphicsItem):
"""A red-green filled square with altitudes"""
def __init__(self, start, end, levels, parent=None):
super().__init__(parent)
self.start, self.end = start, end
self.levels = levels
self.rect = QRect(0, 0, 100, 100)
def paint(self, painter: QPainter, option, widget=None):
painter.setPen(QPen(Qt.black, 0))
self.rect = getRect(widget)
gradient = QLinearGradient(self.rect.topLeft(), self.rect.bottomLeft())
gradient.setColorAt(0, Qt.red)
gradient.setColorAt(1, Qt.green)
painter.setBrush(gradient)
painter.drawRect(self.rect)
metrics = painter.fontMetrics()
for level in self.levels:
text = str(int(level))
w, h = metrics.width(text), metrics.height()
y = self.rect.height() - (level - self.start) / (
self.end -
self.start) * self.rect.height() + self.rect.y() - h / 2
x = self.rect.x() - w - 10
text_rect = QRectF(x, y, w, h)
painter.drawText(text_rect, Qt.AlignRight, text)
def boundingRect(self):
adjust = 2
return QRectF(self.rect.x() - adjust,
self.rect.y() - adjust,
self.rect.width() + adjust,
self.rect.height() + adjust)
class CameraTri(QGraphicsItem):
"""A triangle showing a level of the camera"""
def __init__(self, start, end, pos, parent=None):
super().__init__(parent)
self.start, self.end = start, end
self.pos = pos
self.line = QLine(0, 0, 100, 0)
def updatePos(self, pos):
self.pos = pos
self.update()
def getPoint(self):
if self.pos < self.start:
return self.line.p1()
elif self.pos > self.end:
return self.line.p2()
else:
c = (self.pos - self.start) / (self.end - self.start)
return self.line.p1() * (1 - c) + self.line.p2() * c
def paint(self, painter: QPainter, option, widget=None):
rect = getRect(widget)
delta = QPoint(5, 0)
self.line = QLine(rect.bottomRight() + delta * 2,
rect.topRight() + delta * 2)
point = self.getPoint()
painter.setPen(QPen(Qt.black, 0))
painter.setBrush(Qt.black)
offset_h = QPointF(10, 0)
offset_v = QPointF(0, 10)
points = QPolygonF((
QPointF(point),
QPointF(point + offset_h - offset_v),
QPointF(point + offset_h + offset_v),
QPointF(point)
))
painter.drawPolygon(points, 4)
def boundingRect(self):
offset = QPointF(20, 0)
return QRectF(self.line.p1() - offset, self.line.p2() + offset)
class ElevationGraphWidget(QGraphicsView):
"""A widget to show an elevation of camera relative to the
given altitude"""
def __init__(self, start, end, pos, width=240, height=240,
levels=None, parent=None):
super().__init__(parent)
self.start, self.end = start, end
self.pos = pos
self.levels = np.linspace(start, end, 5) if levels is None else levels
self.resize(width, height)
scene = QGraphicsScene(self)
scene.setItemIndexMethod(QGraphicsScene.NoIndex)
scene.setSceneRect(0, 0, self.width(), self.height())
self.setScene(scene)
self.setCacheMode(self.CacheBackground)
self.setViewportUpdateMode(self.BoundingRectViewportUpdate)
self.setRenderHint(QPainter.Antialiasing)
self.setTransformationAnchor(self.AnchorUnderMouse)
self.setSizeAdjustPolicy(self.AdjustToContents)
self.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setMinimumSize(60, 60)
self.initContent()
def initContent(self):
self.square = ElevationSquare(self.start, self.end, self.levels)
self.tri = CameraTri(self.start, self.end, self.pos)
self.scene().addItem(self.square)
self.scene().addItem(self.tri)
def updatePos(self, pos):
"""Update a position of the camera
"""
self.tri.updatePos(int(pos))
self.scene().update()
if __name__ == "__main__":
app = QApplication(sys.argv)
widget = ElevationGraphWidget(0, 1000, 500)
widget.show()
app.exec_()
| [
"thexcloud@gmail.com"
] | thexcloud@gmail.com |
f5e424239ab2839fe0d0d067878f8928e02544b2 | 16050a733a926cbbfd7207fa8ce00db1a8fee81c | /apps/API_VK/command/commands/AdminCommands/Control.py | b60065962c18e8e53fb2bcbbef1a889226982424 | [] | no_license | thallkeer/xoma163site | bd87ed4924c244b666343ab30e66477adfad84fa | f86f61c060233028dced4ae48d44015c62625358 | refs/heads/master | 2021-05-17T14:27:33.785029 | 2020-03-19T16:32:57 | 2020-03-19T16:32:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from apps.API_VK.command.CommonCommand import CommonCommand
# ToDo: get chat by id or name
class Control(CommonCommand):
def __init__(self):
names = ["управление", "сообщение"]
help_text = "Управление - отправление сообщение в любую конфу"
detail_help_text = "Управление (N,M) - N - chat_id, M - сообщение"
super().__init__(names, help_text, detail_help_text, access='admin', args=2, int_args=[0])
def start(self):
msg_chat_id = self.vk_event.args[0]
msg = self.vk_event.params_without_keys.split(' ', 1)[1]
self.vk_bot.send_message(self.vk_bot.get_group_id(msg_chat_id), msg)
| [
"Xoma163rus@gmail.com"
] | Xoma163rus@gmail.com |
45aec375bad35993810ff1642d6699b3217130a7 | c18bfe1c2c78962ebcfb4296697324dbadbcca48 | /build/my_personal_robotic_companion/robot_model/collada_parser/catkin_generated/pkg.installspace.context.pc.py | 5ece18fad88f276b8a09448fef516eca9a4157db | [] | no_license | MIsmailKhan/SERO | e1ac693752c4201f987a13d3dff2ece69bb6e59d | da4956682741c14c2eb6c49ea83e9d76987ce1ff | refs/heads/master | 2021-01-17T05:26:01.160822 | 2017-06-28T17:30:44 | 2017-06-28T17:30:44 | 95,693,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ismail/catkin_ws2/install/include;/usr/include".split(';') if "/home/ismail/catkin_ws2/install/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;urdf_parser_plugin".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcollada_parser".split(';') if "-lcollada_parser" != "" else []
PROJECT_NAME = "collada_parser"
PROJECT_SPACE_DIR = "/home/ismail/catkin_ws2/install"
PROJECT_VERSION = "1.11.7"
| [
"ismailkhan777@gmail.com"
] | ismailkhan777@gmail.com |
d5cb86c0a9193341d40321d7fcab8fc9e816be35 | 882de85f0a5e99320848ff2e140723888c075420 | /plugins/houdini/create/create_vbd_cache.py | eb836848ca0d4299d013214b2c636ed243fd265e | [
"MIT"
] | permissive | Panda-Luffy/reveries-config | 60fb9511f9c4ff9bb9f0c74e2a9d2c390684a033 | 09fb5be23816031e1407b02669049322fd6a8c16 | refs/heads/master | 2020-05-16T15:35:47.027217 | 2019-04-23T11:27:21 | 2019-04-23T11:27:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | from avalon import houdini
class CreateVDBCache(houdini.Creator):
"""OpenVDB from Geometry ROP"""
label = "VDB Cache"
family = "reveries.vdbcache"
icon = "cloud"
def __init__(self, *args, **kwargs):
super(CreateVDBCache, self).__init__(*args, **kwargs)
# Remove the active, we are checking the bypass flag of the nodes
self.data.pop("active", None)
# Set node type to create for output
self.data["node_type"] = "geometry"
def process(self):
instance = super(CreateVDBCache, self).process()
parms = {"sopoutput": "$HIP/pyblish/%s.$F4.vdb" % self.name,
"initsim": True}
if self.nodes:
node = self.nodes[0]
parms.update({"soppath": node.path()})
instance.setParms(parms)
| [
"davidlatwe@gmail.com"
] | davidlatwe@gmail.com |
dbb6d5cb1e195b116fb37bd4c544da4e49b82da1 | ea96ef74bc93762a0ed5d616a831259daf24992e | /knesset_data/dataservice/tests/base/test_exceptions.py | fb074ac802766e8d7a512d55097d6591e061ea20 | [] | no_license | alonisser/knesset-data-python | ca41675a31c6ea1a228fb43ad50b90dfa5c8d60b | 39aeb40edbb6c3b7af5967a90ac14bbf262fa6ad | refs/heads/master | 2021-01-22T20:44:28.734243 | 2017-07-02T08:38:53 | 2017-07-02T08:38:53 | 85,351,225 | 0 | 0 | null | 2017-03-17T20:16:18 | 2017-03-17T20:16:18 | null | UTF-8 | Python | false | false | 3,394 | py | import unittest
from datetime import datetime
from knesset_data.dataservice.committees import Committee, CommitteeMeeting
from knesset_data.dataservice.exceptions import KnessetDataServiceRequestException, KnessetDataServiceObjectException
from knesset_data.dataservice.mocks import MockMember
from knesset_data.utils.testutils import data_dependant_test
class CommitteeWithVeryShortTimeoutAndInvalidService(Committee):
DEFAULT_REQUEST_TIMEOUT_SECONDS = 1
METHOD_NAME = "Invalid Method Name"
class CommitteeMeetingWithVeryShortTimeoutAndInvalidService(CommitteeMeeting):
DEFAULT_REQUEST_TIMEOUT_SECONDS = 1
METHOD_NAME = "FOOBARBAZBAX"
class TestDataServiceRequestExceptions(unittest.TestCase):
def test_member_exception(self):
# get_page - raises an exception as soon as it's encountered
exception = None
try:
list(MockMember.get_page())
except Exception, e:
exception = e
self.assertEqual(exception.message, "member with exception on init")
# get - raises an exception as soon as it's encountered
exception = None
try:
MockMember.get(215)
except Exception, e:
exception = e
self.assertEqual(exception.message, "member with exception on get")
def test_member_skipped_exceptions(self):
# get_page with skip_exceptions - yields exception objects on error
self.assertEqual([o.message if isinstance(o, KnessetDataServiceObjectException) else o.id
for o in MockMember.get_page(skip_exceptions=True)],
[200, 201, 202, 'member with exception on init', 'member with exception on parse'])
@data_dependant_test()
def test_committee(self):
exception = None
try:
CommitteeWithVeryShortTimeoutAndInvalidService.get(1)
except KnessetDataServiceRequestException as e:
exception = e
self.assertIsInstance(exception, KnessetDataServiceRequestException)
self.assertListEqual([
exception.knesset_data_method_name,
exception.knesset_data_service_name,
exception.url,
str(exception.message)
], [
'Invalid Method Name',
'committees',
'http://online.knesset.gov.il/WsinternetSps/KnessetDataService/CommitteeScheduleData.svc/Invalid%20Method%20Name(1)',
"('Connection aborted.', error(104, 'Connection reset by peer'))",
])
@data_dependant_test()
def test_committee_meeting(self):
exception = None
try:
CommitteeMeetingWithVeryShortTimeoutAndInvalidService.get(1, datetime(2016, 1, 1))
except KnessetDataServiceRequestException as e:
exception = e
self.assertIsInstance(exception, KnessetDataServiceRequestException)
self.assertListEqual([
exception.knesset_data_method_name,
exception.knesset_data_service_name,
exception.url,
str(exception.message)
], [
'FOOBARBAZBAX',
'committees',
'http://online.knesset.gov.il/WsinternetSps/KnessetDataService/CommitteeScheduleData.svc/FOOBARBAZBAX?CommitteeId=%271%27&FromDate=%272016-01-01T00%3A00%3A00%27',
"('Connection aborted.', error(104, 'Connection reset by peer'))",
])
| [
"ori@uumpa.com"
] | ori@uumpa.com |
127c9acd8a9765018edf85f3ff4d9c51c329f2f6 | f0a0aa6a5fe0ded715bb65c78dbedd9b2ca24e2f | /100DaysOfPython/Day23/TurtleCrossingProject/car_manager.py | 41842e2f967a34cc57b00e5231eeb43ea972cf9f | [] | no_license | DeepanshuSarawagi/python | bb911dd8dfc2d567a2c51679bb10c3f37f11e500 | a7b4ea3a0b6f1f373df3220ca655e575ae401d09 | refs/heads/master | 2022-04-26T17:46:39.301757 | 2021-08-25T15:08:14 | 2021-08-25T15:08:14 | 228,613,467 | 1 | 0 | null | 2022-04-22T23:32:41 | 2019-12-17T12:38:01 | Python | UTF-8 | Python | false | false | 848 | py | from turtle import Turtle
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 5
class CarManager:
def __init__(self):
self.all_cars = []
self.car_speed = STARTING_MOVE_DISTANCE
def create_car(self):
random_chance = random.randint(1, 6)
if random_chance == 1:
new_car = Turtle("square")
new_car.shapesize(stretch_wid=1, stretch_len=2)
new_car.penup()
new_car.color(random.choice(COLORS))
random_y = random.randint(-250, 250)
new_car.goto(300, random_y)
self.all_cars.append(new_car)
def move_cars(self):
for car in self.all_cars:
car.backward(self.car_speed)
def level_up(self):
self.car_speed += MOVE_INCREMENT
| [
"deepanshusarawagi@gmail.com"
] | deepanshusarawagi@gmail.com |
5b4ba8b693c93e992fdc3aea4e2d9e67832dea69 | f63028878311f21f73ed21f9bc88a0fd2ba8ba88 | /01.python/ch09/ex21.py | 6b1414f677e5d2c1f87989df1df683d52416e500 | [] | no_license | nugeat23/workspace | ac12b93b0cb826206138aa2262382b0e6389977b | 221344b95daa40c3ba66d27e04cbf9dae3172edc | refs/heads/master | 2023-07-14T20:37:32.851769 | 2021-09-01T08:55:01 | 2021-09-01T08:55:01 | 383,780,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | score = 88,95,70,100,99
print(score)
print(type(score))
score = 88,
print(score)
print(type(score))
score = 88
print(score)
print(type(score))
# tuple() --->tuple 변환 함수
# list() --->list 변환 함수
| [
"nugeat23@gmail.com"
] | nugeat23@gmail.com |
e4d66bd82a67e533b833a4f380e67ced94e9c950 | 8016e033484d3cb88a4ee9b82bd3ca08557c12aa | /teclado_flow/oop/employee_with_decorator.py | 8bf063ea537f272bfd325b187915cbc9bb737313 | [] | no_license | keys4words/python | 72ecf5de80b14ad3a94abe1d48e82035a2f0fa3d | 08431836498e6caed8e01cbc3548b295b69056fe | refs/heads/master | 2021-06-16T19:42:21.294976 | 2020-04-30T14:40:24 | 2020-04-30T14:40:24 | 187,210,896 | 0 | 0 | null | 2021-03-20T01:25:04 | 2019-05-17T12:16:40 | Python | UTF-8 | Python | false | false | 796 | py | class Student:
def __init__(self, first, last):
self.first = first
self.last = last
# self.email = first.lower() + '.' + last.lower() + '@gmail.com'
@property
def email(self):
return '{}.{}@gmail.com'.format(self.first.lower(), self.last.lower())
@property
def fullname(self):
return '{} {}'.format(self.first, self.last)
@fullname.setter
def fullname(self, name):
first, last = name.split(' ')
self.first = first
self.last = last
@fullname.deleter
def fullname(self):
print('Delete Name!')
self.first = None
self.last = None
emp1 = Student('John', 'Rembo')
emp1.first = 'Jim'
emp1.fullname = 'James Bond'
print(emp1.fullname)
print(emp1.email)
del emp1.fullname
| [
"keys4words@gmail.com"
] | keys4words@gmail.com |
bb3f848cbd7e7206f778f4b3e89beca4f54cd461 | 67d8173a716da10a7350213d98938aae9f2115ce | /LeetCode/LC_PY_ANSWERS/random-pick-with-blacklist.py | 951cb20be9569d8fe879dea6da79554abcb45a75 | [
"MIT"
] | permissive | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 1,366 | py | # Time: ctor: O(b)
# pick: O(1)
# Space: O(b)
import random
class Solution(object):
def __init__(self, N, blacklist):
"""
:type N: int
:type blacklist: List[int]
"""
self.__n = N - len(blacklist)
self.__lookup = {}
white = iter(set(range(self.__n, N)) - set(blacklist))
for black in blacklist:
if black < self.__n:
self.__lookup[black] = next(white)
def pick(self):
"""
:rtype: int
"""
index = random.randint(0, self.__n - 1)
return self.__lookup[index] if index in self.__lookup else index
# Time: ctor: O(blogb)
# pick: O(logb)
# Space: O(b)
import random
class Solution2(object):
def __init__(self, N, blacklist):
"""
:type N: int
:type blacklist: List[int]
"""
self.__n = N - len(blacklist)
blacklist.sort()
self.__blacklist = blacklist
def pick(self):
"""
:rtype: int
"""
index = random.randint(0, self.__n - 1)
left, right = 0, len(self.__blacklist) - 1
while left <= right:
mid = left + (right - left) // 2
if index + mid < self.__blacklist[mid]:
right = mid - 1
else:
left = mid + 1
return index + left
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
24ac38582013f5f4e58677f7f4c5ec41d47a34df | bf7c60d6bb4c3f35dd3d4114a3457026c6f6b95c | /blog/forms.py | 38901f11dbe5f9bb27a734edade7db4214fec740 | [] | no_license | zerofuxor/ContentQ-CMS | 2a093de904b332134abf851bd22f1a70c47e87dc | db7154910d3776ba3daf90f97fccb1cc51e5bf94 | refs/heads/master | 2021-01-17T07:45:49.614678 | 2011-01-07T18:16:32 | 2011-01-07T18:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | # Copyright 2010 Jose Maria Zambrana Arze <contact@josezambrana.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from google.appengine.ext.db import djangoforms
from django import forms
from blog import models
from common import forms as common_forms
TYPE_CHOICES = (('page','Page'),('post','Post'))
class PostCategoryForm(common_forms.CategoryForm):
class Meta:
model = models.PostCategory
exclude = ['uuid', 'slug', 'deleted_at']
class PostItemForm(common_forms.BaseContentForm):
class Meta:
model = models.PostItem
exclude = ['uuid', 'slug', 'plain_description','created_at', 'updated_at', 'deleted_at']
def __init__(self, *args, **kwargs):
super(PostItemForm, self).__init__(*args, **kwargs)
self.fields['category'].widget = forms.Select(choices=models.PostCategory.get_choices())
self.fields.keyOrder = ["name", "category", "status","description", "body", "tags", "meta_desc"]
| [
"contact@josezambrana.com"
] | contact@josezambrana.com |
15fb0a5f474eae9654c88030017361c3699aee95 | 9184e230f8b212e8f686a466c84ecc89abe375d1 | /arcseventdata/applications/make-tobyfit-par.py | da6c3877893937b116b75b899abe6c2270c007ab | [] | no_license | danse-inelastic/DrChops | 75b793d806e6351dde847f1d92ab6eebb1ef24d2 | 7ba4ce07a5a4645942192b4b81f7afcae505db90 | refs/heads/master | 2022-04-26T17:37:41.666851 | 2015-05-02T23:21:13 | 2015-05-02T23:21:13 | 34,094,584 | 0 | 1 | null | 2020-09-10T01:50:10 | 2015-04-17T03:30:52 | Python | UTF-8 | Python | false | false | 3,694 | py | #!/usr/bin/env python
import journal
info = journal.info('make-merlin-par')
def getpixelinfo(ARCSxml):
info.log('parsing acrs xml: %s' % ARCSxml)
from instrument.nixml import parse_file
instrument = parse_file(ARCSxml)
info.log('getting detector axes')
from arcseventdata.GetDetectorAxesInfo import getDetectorAxes
detaxes = getDetectorAxes(instrument)
npacks, ndetsperpack, npixelsperdet = [axis.size() for axis in detaxes]
info.log('getting pixel radii and heights')
from arcseventdata.getpixelsizes import getpixelsizes
radii, heights = getpixelsizes(
instrument, npacks, ndetsperpack, npixelsperdet)
widths = radii*2.
info.log('getting pixel L2, phis, psis')
from arcseventdata import getinstrumentinfo
ii = getinstrumentinfo(ARCSxml)
dists = ii['dists']
phis = ii['phis']
psis = ii['psis']
sas = ii['solidangles']
from reduction.units import length, angle
dists = dists.I
phis = phis.I
psis = psis.I
sas = sas.I
# 06/30/2009: was told that the last two columns are angles
da1 = widths/dists
da2 = sas/da1
dists.shape = phis.shape = psis.shape = widths.shape = heights.shape = -1,
da1.shape = da2.shape = -1,
#return dists, phis, psis, widths, heights
return dists, phis, psis, da1, da2
def getpixelinfo_mergedpixels(ARCSxml, pixelresolution):
npixels = 128
assert npixels%pixelresolution==0
from arcseventdata.combinepixels import combinepixels, geometricInfo, geometricInfo_MergedPixels
from histogram import axis
pixelaxis = axis('pixelID', range(0, 128, pixelresolution))
info.log('merging pixels')
phi_p, psi_p, dist_p, sa_p, dphi_p, dpsi_p = combinepixels(ARCSxml, pixelaxis, pixelresolution)
positions, radii, heights = geometricInfo(ARCSxml)
positions, radii, heights = geometricInfo_MergedPixels(positions, radii, heights, pixelresolution)
widths = radii*2
phis = phi_p.I
psis = psi_p.I
dists = dist_p.I
sas = sa_p.I
# 06/30/2009: was told that the last two columns are angles
da1 = widths/dists
da2 = sas/da1
dists.shape = phis.shape = psis.shape = widths.shape = heights.shape = -1,
da1.shape = da2.shape = -1,
#return dists, phis, psis, widths, heights
return dists, phis, psis, da1, da2
def writePar(stream, dists, phis, psis, widths, heights):
info.log('writing to par file')
n = len(dists)
assert n==len(phis) and n==len(psis) and n==len(widths) and n==len(heights)
def format(f):
return '%8.3f' % f
stream.write(str(n)+'\n')
for line in zip(dists, phis, psis, widths, heights):
s = ''.join(map(format, line))
s += '\n'
stream.write(s)
continue
return
from pyre.applications.Script import Script
class App(Script):
class Inventory(Script.Inventory):
import pyre.inventory
arcsxml = pyre.inventory.str('x', default='ARCS.xml')
outfile = pyre.inventory.str('o', default='ARCS.par')
resolution = pyre.inventory.int('r', default=1)
def main(self):
arcsxml = self.inventory.arcsxml
outfile = self.inventory.outfile
resolution = self.inventory.resolution
if resolution == 1:
dists, phis, psis, widths, heights = getpixelinfo(arcsxml)
else:
dists, phis, psis, widths, heights = getpixelinfo_mergedpixels(arcsxml, resolution)
writePar(open(outfile, 'w'), dists, phis, psis, widths, heights)
return
def main():
info.activate()
app = App('make-merlin-par')
app.run()
return
if __name__ == '__main__': main()
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
ffdd53afd9bef80915daae81253f58ffa97d52a2 | 1e168ced1a4bdb53967021e082b98027aea9d38a | /3.해커랭크/PythonString/TextWrap/hacker.py | 7c950f4d5a5d65b82f1504f4cf2b6c85f1ca3353 | [] | no_license | vvspearlvvs/CodingTest | 3ebf921308570ac11eb87e6660048ccfcaf90ce4 | fc61b71d955f73ef8710f792d008bc671614ef7a | refs/heads/main | 2023-07-13T15:57:11.312519 | 2021-08-25T02:15:28 | 2021-08-25T02:15:28 | 354,232,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | def wrap(string,max_width):
result=[]
for i in range(0,len(string),max_width):
result.append(string[i:i+max_width])
return '\n'.join(result)
string="ABCDEFGHIJKLIMNOQRSTUVWXYZ"
max_width=4
r=wrap(string,max_width)
print(r)
'''
총길이26
0-3 4*1-1
4-7 4*2-1
8-11
12-15
16-19
20-23
24-27
24 25
'''
| [
"gg66477@gmail.com"
] | gg66477@gmail.com |
2c422b8ebd1397f4b9c53bebe2ededabcab3119d | 0c03fcf9b3bdb43473a740935c4bf028b40e5ec4 | /AegisServer/message.py | da4ac0a66dbaa4f373913828105715940060f455 | [] | no_license | Bowserinator/AegisServer2 | afe6b7fbd826e031a00aec94501db7f88a133b5f | 06fc64e54adcaf82ff9a62f1047f5aee5c07672a | refs/heads/master | 2021-01-01T03:56:38.310579 | 2016-05-24T22:21:19 | 2016-05-24T22:21:19 | 59,434,565 | 0 | 2 | null | 2016-10-28T23:47:40 | 2016-05-22T21:10:50 | Python | UTF-8 | Python | false | false | 1,083 | py |
#:IndigoTiger!Brad@botters/IndigoTiger MODE ##BWBellairs-bots -o+b Bowserinator *!*@unaffiliated/bowserinator
#MULTI MODE EXAMPLE on one user
#:IndigoTiger!Brad@botters/IndigoTiger MODE ##BWBellairs-bots +oooo RadioNeat IovoidBot wolfy1339 Andromeda-dev
#MULTI OP EXAMPLE
def phraseModeUser(ircmsg): #Gets modes on users
message = ircmsg
ircmsg = ircmsg.split(" MODE ")[1]
channel = ircmsg.split(" ")[0]
modes = ircmsg.split(" ")[1]
users = ircmsg.split(" ",2)[2]
if len(users) == 0:
return None
currentMode = ""; modes2 = []
for i in modes:
if i=="+" or i=="-":
currentMode = i; continue
if i in ['e','I','b','q','o','v' ]: #Modes with user parameters
modes2.append(currentMode+i)
if len(users.split(" ")) == 1: #1 user
return [message.split(" MODE ")[0], [[users,modes2]], channel]
userA = []
for i in users.split(" "):
userA.append([i,modes2[users.split(" ").index(i)] ])
return [message.split(" MODE ")[0], userA , channel]
| [
"bowserinator@gmail.com"
] | bowserinator@gmail.com |
ed2674f00f099d8225bcc2a8ea8f6830ef935e78 | fde186bd141ed055ba8ab915b2ad25355f8f3fb6 | /ABC/111/py/B.py | 73d5183e6546cf60e8fcf5a3b020f01075dad8ca | [] | no_license | Tsukumo3/Atcoder | 259ea6487ad25ba2d4bf96d3e1cf9be4a427d24e | 5f8d5cf4c0edee5f54b8e78bc14a62e23cab69cb | refs/heads/master | 2020-12-20T05:04:39.222657 | 2020-10-17T01:39:04 | 2020-10-17T01:39:04 | 235,969,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | n = input()
first = n[0]
length = len(n)
guess = int(''.join([first]*length))
if guess >= int(n):
print(guess)
else:
next = int(first)+1
guess = int(''.join([str(next)]*length))
print(guess)
| [
"53821328+Tsukumo3@users.noreply.github.com"
] | 53821328+Tsukumo3@users.noreply.github.com |
af7975f0ce7ddd2e1f93104e71e1a5e5505653ce | a2cdf6290ddbe897bff0778991bdbaff5adbf1e5 | /Stub REST API implementation/NLP_Analysis/nlpanalysis.py | fe5b0c063ed4ed6ae0dafaba1d996acc6cf4d87b | [] | no_license | BUEC500C1/news-analyzer-JimY233 | 90796cc09ae3565c0ad1724d90cb549161dee488 | 3dcb3dc6ae7c882751dc385a7b6e8615563ebaf9 | refs/heads/main | 2023-04-14T15:44:25.237727 | 2021-04-28T12:50:09 | 2021-04-28T12:50:09 | 337,237,874 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | """
NLP analysis
Procedure-based Api
"""
from flask import Flask
from flask_restful import Resource, Api
import json
app = Flask(__name__)
api = Api(app)
#data in json file for example
TEXT = {"TEXT_ID": "text_id",
"TEXT": "text",
"Sentiment": "semtiment",
"NLP": ["nlp1","nlp2","nlp3"]
}
'''
Events
Event_Convert: File is converted to text in English
Event_Process: NLP analysis request and process the analysis
Event_Update: File is updated and re-analyze again
'''
def convert_text():
"convert the input to text in English"
pass
def create_data():
'''
check if it is text and in English, if not, use convert_text()
if convert event not successful, log error message
return success or failure
'''
pass
def update_data(data,message):
'''
update the data with requirement and log the message
return success or failure
'''
pass
def delete_data(data):
'''
delete the data and log the message
return success or failure
'''
pass
class NLPanalysis(Resource):
def post(self): #create: upload file and create the data for this file
'''
if Event_upload:
create_data() #create data
'''
return {'Hello': 'world'}
def delete(self): #delete: delete the file and relating data
pass
def put(self): #update: update the data record
pass
def get(self): #read: read data json file and return information
pass
api.add_resource(NLPanalysis,'/')
if __name__ == '__main__':
app.run(debug = True)
| [
"noreply@github.com"
] | BUEC500C1.noreply@github.com |
1d6fb51b992dd0221609f01ea891b56d2ff09c56 | 7f52618136c8d9b9ba0ce8f89f3fcc90c4e6feb7 | /csa_new/csa_new/doctype/umpire_level/umpire_level.py | 1161b6944d2e90d3760fea509076f58023729916 | [
"MIT"
] | permissive | Jishnu70055/user_management | 7ade7f196f974ea0b3ddb220e3fca49665d9de3b | 82d3d2c85a62c7f1162633c164cb7d50e229d2fd | refs/heads/main | 2023-07-06T14:03:00.213723 | 2021-08-10T12:42:10 | 2021-08-10T12:42:10 | 394,649,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | # Copyright (c) 2021, sd and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class Umpirelevel(Document):
pass
| [
"jishnudq70055@gmail.com"
] | jishnudq70055@gmail.com |
1e77ea0f2585d378b5f441157e9e6ff618ea0b73 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/452/usersdata/302/104113/submittedfiles/avenida.py | 73284b82cb60a4041aaa2603048d546eb5cb7a7c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # -*- coding: utf-8 -*-
m = int(input('Digite o número de linhas: '))
n = int(input('DIgite o número de colunas: '))
matriz = []
for i in range(m):
linha = []
for j in range(n):
linha.append(float(input('Digite o elemento %d de %d: ' %((j+1),(i+1)))))
matriz.append(linha)
print(matriz)
a = 0
b = 0
c = 0
for i in range(m-1):
for j in range(n-1):
a = a + matriz[j][i]
print(a)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
1f8869417410dde70157bbd60e97c6e6b31bd854 | 4e1aa596a601f684f1f46058fbd91b2040e69cf0 | /scrolltwo/scrolltwo/wsgi.py | 575fd8d9926fd0c07df66f7cef88dca8a5785793 | [] | no_license | dpuman/cs50sui | 584b99f2580ddc4fa68454a9aa00d2e2a675ce03 | a019178d5319a56eae7b3fc5855dfd3adf427f0a | refs/heads/master | 2023-01-31T20:01:55.770764 | 2020-12-15T18:00:14 | 2020-12-15T18:00:14 | 318,615,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for scrolltwo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrolltwo.settings')
application = get_wsgi_application()
| [
"dpumail.in@gmail.com"
] | dpumail.in@gmail.com |
94a16418a44c7c93ab1561beab6aeb8798bb1701 | d9e8b7d5c468b38cdf18cece9dff12ad1188a71b | /Models/project_Clothes/Cloth_proj/FirstApp/migrations/0001_initial.py | fc6e1d9c9644f1af71cd698e3db2147011f152cc | [] | no_license | Ruchika-Munde/Django_Task | f14e0497a4f8045a68dbe58bbd772abf606369d3 | 7fa549842a544527b9f78cbfcf52c26dde31463c | refs/heads/master | 2022-12-16T17:53:12.577323 | 2020-09-09T07:34:43 | 2020-09-09T07:34:43 | 294,036,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # Generated by Django 2.2.7 on 2019-11-28 16:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Clothes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Catagory', models.TextField()),
('Price', models.FloatField()),
('Pattern', models.TextField()),
],
),
]
| [
"ruchamunde@gmail.com"
] | ruchamunde@gmail.com |
380e0a119079ff1619313228729d7cc6057e2410 | eae2fb8556f62722e4467f4554747785aaf907be | /gan/plotting/epochs.py | 4f40f01ccdb5eb67a2beeb9b2d53392a31a34ccb | [] | no_license | aminnj/DY-GAN | a78fc9c4cb75e9f3238af735a92615c4b3586fe7 | a2a3f4e293b25ce2edcbdfd677270bc81ab14838 | refs/heads/master | 2020-05-24T23:09:36.553106 | 2018-10-17T22:13:11 | 2018-10-17T22:13:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,202 | py | import sys
sys.path.extend(["../../","../"])
import os
import numpy as np
from physicsfuncs import *
import glob
import ROOT as r
import plottery.plottery as ply
import plottery.utils as plu
def get_quantities(fname):
data = np.load(fname)
masses = Minv(data)
return {
"epoch": int(fname.rsplit("_",1)[-1].split(".")[0]),
"masses": masses[np.isfinite(masses)],
"Z_pZ": Z_pZ(data),
"Z_pT": Z_pT(data),
"phis": get_phis(data),
"dphis": get_dphis(data),
"etas": get_etas(data),
"detas": get_detas(data),
}
# fnames = glob.glob("../progress/vtestadam/*npy")
fnames = glob.glob("../progress/vdecaynoise/npy")
data = np.load("../data_xyz.npy")
points_mz = []
points_zpt = []
points_zpz = []
points_phis = []
points_dphis = []
points_etas = []
points_detas = []
fnames = sorted(fnames, key=lambda x:int(x.rsplit("_",1)[-1].split(".")[0]))
for fname in fnames:
quantities = get_quantities(fname)
if not np.isfinite(quantities["masses"].mean()): continue
if not np.isfinite(quantities["masses"].std()): continue
points_mz.append([quantities["epoch"], quantities["masses"].mean(), quantities["masses"].std()])
points_zpt.append([quantities["epoch"], quantities["Z_pT"].mean(), quantities["Z_pT"].std()])
points_zpz.append([quantities["epoch"], quantities["Z_pZ"].mean(), quantities["Z_pZ"].std()])
points_phis.append([quantities["epoch"], quantities["phis"].mean(), quantities["phis"].std()])
points_dphis.append([quantities["epoch"], quantities["dphis"].mean(), quantities["dphis"].std()])
points_etas.append([quantities["epoch"], quantities["etas"].mean(), quantities["etas"].std()])
points_detas.append([quantities["epoch"], quantities["detas"].mean(), quantities["detas"].std()])
mZs = data[:,0]
zpz = Z_pZ(data[:,range(1,9)])
zpt = Z_pT(data[:,range(1,9)])
phis = get_phis(data[:,range(1,9)])
dphis = get_dphis(data[:,range(1,9)])
etas = get_etas(data[:,range(1,9)])
detas = get_detas(data[:,range(1,9)])
# zcent = 90.9925
# zstd = 5.2383
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def make_plot(points, truth, label_truth, label_pred, fname):
truth_cent = truth.mean()
truth_std = truth.std()
points = sorted(points)
smooth = True
window = 15
points = np.array(points)
if not smooth:
xvals = points[:,0]
yvals = points[:,1]
ydown = points[:,2]
yup = points[:,2]
else:
xvals = points[:,0][window-1:]
yvals = moving_average(points[:,1],n=window)
ydown = moving_average(points[:,2],n=window)
yup = moving_average(points[:,2],n=window)
# hpred = r.TH1F("hpred",100,truth.min(),truth.max())
# htruth = r.TH1F("htruth",100,truth.min(),truth.max())
# fill_fast(hpred, yvals)
# fill_fast(htruth, truth)
ply.plot_graph(
[
([0.,max(xvals)],[truth_cent,truth_cent],[truth_std,truth_std],[truth_std,truth_std]),
(xvals,yvals,ydown,yup),
],
colors = [r.kAzure+2,r.kRed-2],
legend_labels = [label_truth, label_pred],
options = {
"legend_alignment": "bottom right",
"legend_scalex": 0.7,
"xaxis_label": "epoch",
"yaxis_label": label_pred,
"output_name": fname,
"output_ic": True,
}
)
make_plot(points_mz, mZs, "m_{Z}", "#mu(inv. mass)", "plots/epoch_mz.png")
make_plot(points_zpt, zpt, "p_{T}^{Z}", "p_{T}^{Z} generated", "plots/epoch_zpt.png")
make_plot(points_zpz, zpz, "p_{z}^{Z}", "p_{z}^{Z} generated", "plots/epoch_zpz.png")
make_plot(points_phis, phis, "#phi(lep)", "#phi(lep) generated", "plots/epoch_phis.png")
make_plot(points_dphis, dphis, "#delta#phi(l1,l2)", "#delta#phi(l1,l2) generated", "plots/epoch_dphis.png")
make_plot(points_etas, etas, "#eta(lep)", "#eta(lep) generated", "plots/epoch_etas.png")
make_plot(points_detas, detas, "#delta#eta(l1,l2)", "#delta#eta(l1,l2) generated", "plots/epoch_detas.png")
| [
"amin.nj@gmail.com"
] | amin.nj@gmail.com |
2cc0e7cd02d52e631303ca9340ee8a22a5c7bcfe | 44722fb1541645937f17e8e920f4954ff99cc046 | /src/gamesbyexample/rainbow.py | 553517b98e511d9caf24f98f17260bc85c482dfa | [] | no_license | asweigart/gamesbyexample | a065d21be6c2e05a4c17643986b667efae0bc6de | 222bfc3b15ade1cf3bde158ba72a8b7a969ccc5a | refs/heads/main | 2023-07-16T12:12:58.541597 | 2021-09-01T21:24:35 | 2021-09-01T21:24:35 | 343,331,493 | 89 | 10 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | """Rainbow, by Al Sweigart al@inventwithpython.com
Shows a simple rainbow animation. Press Ctrl-C to stop.
This code is available at https://nostarch.com/big-book-small-python-programming
Tags: tiny, artistic, bext, beginner, scrolling"""
__version__ = 0
import time, sys
try:
import bext
except ImportError:
print('This program requires the bext module, which you')
print('can install by following the instructions at')
print('https://pypi.org/project/Bext/')
sys.exit()
print('Rainbow, by Al Sweigart al@inventwithpython.com')
print('Press Ctrl-C to stop.')
time.sleep(3)
indent = 0 # How many spaces to indent.
indentIncreasing = True # Whether the indentation is increasing or not.
try:
while True: # Main program loop.
print(' ' * indent, end='')
bext.fg('red')
print('##', end='')
bext.fg('yellow')
print('##', end='')
bext.fg('green')
print('##', end='')
bext.fg('blue')
print('##', end='')
bext.fg('cyan')
print('##', end='')
bext.fg('purple')
print('##')
if indentIncreasing:
# Increase the number of spaces:
indent = indent + 1
if indent == 60: # (!) Change this to 10 or 30.
# Change direction:
indentIncreasing = False
else:
# Decrease the number of spaces:
indent = indent - 1
if indent == 0:
# Change direction:
indentIncreasing = True
time.sleep(0.02) # Add a slight pause.
except KeyboardInterrupt:
sys.exit() # When Ctrl-C is pressed, end the program.
| [
"asweigart@gmail.com"
] | asweigart@gmail.com |
1312438de18b6fd9bacc2273fba281dfd62f8874 | c1960138a37d9b87bbc6ebd225ec54e09ede4a33 | /adafruit-circuitpython-bundle-py-20210402/lib/adafruit_ble_eddystone/uid.py | fe5807d34308cdf0e9866a52eadba92ba46edf29 | [] | no_license | apalileo/ACCD_PHCR_SP21 | 76d0e27c4203a2e90270cb2d84a75169f5db5240 | 37923f70f4c5536b18f0353470bedab200c67bad | refs/heads/main | 2023-04-07T00:01:35.922061 | 2021-04-15T18:02:22 | 2021-04-15T18:02:22 | 332,101,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | # SPDX-FileCopyrightText: 2020 Scott Shawcroft for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_ble_eddystone.uid`
================================================================================
Static Eddystone UID advertisement. Documented by Google here:
https://github.com/google/eddystone/tree/master/eddystone-uid
"""
from . import EddystoneAdvertisement, EddystoneFrameStruct, EddystoneFrameBytes
__version__ = "1.0.5"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Eddystone.git"
class EddystoneUID(EddystoneAdvertisement): # pylint: disable=too-few-public-methods
"""Static Eddystone unique identifier.
:param bytes instance_id: instance component of the id. 10 bytes long
:param bytes namespace_id: namespace component of the id. 6 bytes long
:param int tx_power: TX power at the beacon
"""
match_prefixes = (b"\x03\xaa\xfe", b"\x16\xaa\xfe\x00")
frame_type = b"\x00"
tx_power = EddystoneFrameStruct("<B", offset=0)
"""TX power at the beacon in dBm"""
namespace_id = EddystoneFrameBytes(length=10, offset=1)
"""10 byte namespace id"""
instance_id = EddystoneFrameBytes(length=6, offset=11)
"""6 byte instance id"""
reserved = EddystoneFrameBytes(length=2, offset=17)
def __init__(self, instance_id, *, namespace_id=b"CircuitPy!", tx_power=0):
super().__init__(minimum_size=20)
if self.mutable:
self.tx_power = tx_power
self.namespace_id = namespace_id
self.instance_id = instance_id
| [
"55570902+apalileo@users.noreply.github.com"
] | 55570902+apalileo@users.noreply.github.com |
131eac026aadd6a26cf792f42e57ceddf82a57cb | 3cb7dd2da84141168aee63a76b11eb0d9f75e6aa | /pretrained_tfmodels.py | 9c77d88842d235076ceb07d643ace6e7a0331104 | [] | no_license | OlgaBelitskaya/kaggle_tfpractice | 8428935d2736c15ea1b56d482ed04a66d01537e6 | ffcee547ea7817ffc5c81eb4817f03a4915f9009 | refs/heads/main | 2023-03-27T07:59:51.459085 | 2021-03-25T16:28:05 | 2021-03-25T16:28:05 | 308,240,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,837 | py | # -*- coding: utf-8 -*-
"""pretrained-tfmodels.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1auXjQuWrUv42OIACAXzYaSZb316dSI-t
"""
# Commented out IPython magic to ensure Python compatibility.
from IPython.display import display,HTML
def dhtml(st):
display(HTML("""<style>
@import url('https://fonts.googleapis.com/css?family=Roboto|Ewert&effect=3d');
</style><p class='font-effect-3d' onclick='setStyle(this,"#00ff66")'
style='font-family:Roboto; font-size:25px; color:#ff355e;'>
# %s</p>"""%st+"""<script>
function setStyle(element,c) {
var docs=document.getElementsByClassName('font-effect-3d');
for (var i=0; i<docs.length; i++) {
docs[i].style='font-family:Ewert; font-size:22px;';
docs[i].style.color=c;}; };
</script>"""))
dhtml('Code Modules & Helpful Functions')
import numpy as np,pylab as pl,pandas as pd
import h5py,tensorflow as tf
import tensorflow_hub as th
def premodel(pixels,dense,mh,labels):
model=tf.keras.Sequential([
tf.keras.layers.Input((pixels,pixels,3),
name='input'),
th.KerasLayer(mh,trainable=True),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense,activation='relu'),
tf.keras.layers.Dropout(rate=.5),
tf.keras.layers.Dense(labels,activation='softmax')])
model.compile(optimizer='adam',metrics=['accuracy'],
loss='sparse_categorical_crossentropy')
display(model.summary())
return model
fw='weights.best.hdf5'
def cb(fw):
early_stopping=tf.keras.callbacks\
.EarlyStopping(monitor='val_loss',patience=20,verbose=2)
checkpointer=tf.keras.callbacks\
.ModelCheckpoint(filepath=fw,save_best_only=True,verbose=2)
lr_reduction=tf.keras.callbacks\
.ReduceLROnPlateau(monitor='val_loss',verbose=2,
patience=5,factor=.8)
return [checkpointer,early_stopping,lr_reduction]
def display_resize(x_train,x_valid,x_test,
y_valid,pixels):
x_train=tf.image.resize(x_train,[pixels,pixels])
x_valid=tf.image.resize(x_valid,[pixels,pixels])
x_test=tf.image.resize(x_test,[pixels,pixels])
img=x_valid[1]
lbl='One example of resized images \nlabel: '+\
str(y_valid[1][0])+'\nshape: '+str(img.shape)
pl.imshow(img); pl.title(lbl)
return [x_train,x_valid,x_test]
dhtml('Data Loading & Preprocessing')
fpath='../input/classification-of-handwritten-letters/'
f='LetterColorImages_123.h5'
f=h5py.File(fpath+f,'r')
keys=list(f.keys()); print(keys)
x=np.array(f[keys[1]],dtype='float32')/255
y=np.array(f[keys[2]],dtype='int32')\
.reshape(-1,1)-1
N=len(y); n=int(.1*N)
shuffle_ids=np.arange(N)
np.random.RandomState(23).shuffle(shuffle_ids)
x,y=x[shuffle_ids],y[shuffle_ids]
x_test,x_valid,x_train=x[:n],x[n:2*n],x[2*n:]
y_test,y_valid,y_train=y[:n],y[n:2*n],y[2*n:]
x_valid.shape,y_valid.shape
dhtml('Pre-Trained Saved Models')
[handle_base,pixels]=["mobilenet_v2_050_96",96]
dhtml('#1 '+handle_base)
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train1,x_valid1,x_test1]=\
display_resize(x_train,x_valid,x_test,
y_valid,pixels)
model=premodel(pixels,1024,mhandle,33)
history=model.fit(x=x_train1,y=y_train,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,y_valid))
model.load_weights(fw)
model.evaluate(x_test1,y_test)
[handle_base,pixels]=["mobilenet_v2_075_96",96]
dhtml('#2 '+handle_base)
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
model=premodel(pixels,1024,mhandle,33)
history=model.fit(x=x_train1,y=y_train,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,y_valid))
model.load_weights(fw)
model.evaluate(x_test1,y_test)
[handle_base,pixels]=["mobilenet_v1_100_128",128]
dhtml('#3 '+handle_base)
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train1,x_valid1,x_test1]=\
display_resize(x_train,x_valid,x_test,
y_valid,pixels)
model=premodel(pixels,1024,mhandle,33)
history=model.fit(x=x_train1,y=y_train,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,y_valid))
model.load_weights(fw)
model.evaluate(x_test1,y_test)
[handle_base,pixels]=["mobilenet_v2_050_128",128]
dhtml('#4 '+handle_base)
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
model=premodel(pixels,1024,mhandle,33)
history=model.fit(x=x_train1,y=y_train,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,y_valid))
model.load_weights(fw)
model.evaluate(x_test1,y_test) | [
"safuolga@gmail.com"
] | safuolga@gmail.com |
7824041c43a79311186c6802442e4a2d26292730 | fb00b570251ba52df467e4cc030a30e778f8a970 | /Atividade 02 - semana 09/questão1_semana9_atividade02_runcodes.py | 6893a0b29843828af8d9941aa6aed51063808c08 | [] | no_license | SirLeonardoFerreira/Atividades-ifpi | 7379f9df4640fd1ee3623d80e4341f495e855895 | e366ee3f801dc9a1876c7399a2eefd37a03d0a55 | refs/heads/master | 2023-01-05T04:03:30.774277 | 2020-11-02T00:56:10 | 2020-11-02T00:56:10 | 287,967,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | from random import randint, seed
seed()
def gerar_matriz(linhas, colunas):
matriz = []
for l in range(linhas):
linha = []
for c in range(colunas):
linha.append(int(input()))
matriz.append(linha)
return matriz
def main():
numero = int(input())
resultado_matriz = gerar_matriz(numero, numero)
aux_coluna = 0
aux_numero = 0
for num_linha in range(len(resultado_matriz)):
for num_coluna in resultado_matriz[num_linha]:
indice_coluna = resultado_matriz[num_linha].index(num_coluna, aux_coluna)
if aux_numero == 0:
num_maior = num_menor = num_coluna
aux_coluna += 1
aux_numero += 1
tupla_maior_num = [num_linha, indice_coluna]
tupla_menor_num = [num_linha, indice_coluna]
if aux_coluna == numero - 1:
aux_coluna = 0
else:
if num_coluna > num_maior:
num_maior = num_coluna
tupla_maior_num = [num_linha, indice_coluna]
elif num_coluna < num_menor:
num_menor = num_coluna
tupla_menor_num = [num_linha, indice_coluna]
aux_coluna += 1
print(f'{tuple(tupla_maior_num)}')
print(f'{tuple(tupla_menor_num)}')
if __name__ == '__main__':
main()
| [
"lleoalves02@gmail.com"
] | lleoalves02@gmail.com |
2a26ace393d5196c245e912229e343c128df3fb8 | 98d61512fdf7f8426d4634a86edd25669944ab9e | /algorithms/DailyTemperatures/solution.py | 2b030a3ca012c45739680ce2a4dc8556e3ca46c6 | [] | no_license | P-ppc/leetcode | 145102804320c6283fa653fc4a7ae89bf745b2fb | 0d90db3f0ca02743ee7d5e959ac7c83cdb435b92 | refs/heads/master | 2021-07-12T02:49:15.369119 | 2018-11-13T05:34:51 | 2018-11-24T12:34:07 | 132,237,265 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | class Solution(object):
def dailyTemperatures(self, temperatures):
"""
:type temperatures: List[int]
:rtype: List[int]
"""
length = len(temperatures)
res = [0] * length
stack = []
for i in xrange(length):
while len(stack) > 0 and stack[-1]['value'] < temperatures[i]:
e = stack.pop()
res[e['index']] = i - e['index']
stack.append({ 'index': i, 'value': temperatures[i] })
return res | [
"ppc-user@foxmail.com"
] | ppc-user@foxmail.com |
86fdb2307fbaf9aa02aa603604f2b03812fbd954 | bd7887ed09185178331b8579a0f01690241f0daf | /Chapter-05/classification-labeledpoint.py | eba3916dbd3ada6b244bfa4e57ef7e19505a5ba8 | [] | no_license | nanfengpo/MachineLearningSpark | a9622ef355e10f3697462eec5168078d01e689d7 | de68154bd08908dc91ca9776c21dc2c6e441d242 | refs/heads/master | 2021-01-21T15:13:54.243853 | 2016-09-29T03:46:43 | 2016-09-29T03:46:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,225 | py | # coding=utf-8
from pyspark import SparkContext, SparkConf
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.classification import SVMWithSGD
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.feature import StandardScaler
conf = SparkConf().setAppName("Classification-LabeledPoint").setMaster("local[2]").set("spark.executor.memory", "5g")
sc = SparkContext(conf= conf)
#get data source
raw_data = sc.textFile("/user-program/python/MachineLearningSpark/Data/train-noheader.tsv")
records = raw_data.map(lambda line : line.split("\t"))
records_first_data = records.first()
print("the first data of records :")
print(records_first_data)
print("the number of records fields :")
print(len(records_first_data))
#get data feature
def labeled_point(r):
trimmed = map(lambda l : l.replace('\"', ' '), r)
label = int(trimmed[len(trimmed)-1])
features = trimmed[4 : len(trimmed)-1]
features = map(lambda f : f.replace('?', '0'), features)
for i in range(0, len(features)):
features[i] = float(features[i])
return LabeledPoint(label, Vectors.dense(features))
data = records.map(lambda r : labeled_point(r))
num_data = data.count()
print("the number of data :")
print(num_data)
def labeled_point_nb(r):
trimmed = map(lambda l : l.replace('\"', ' '), r)
label = int(trimmed[len(trimmed)-1])
features = trimmed[4 : len(trimmed)-1]
features = map(lambda f: f.replace('?', '0'), features)
for i in range(0, len(features)):
features[i] = float(features[i])
if features[i] < 0.0:
features[i] = 0.0
return LabeledPoint(label, Vectors.dense(features))
nb_data = records.map(lambda r : labeled_point_nb(r))
print("the first data of nb data and the count of nb data:")
print(nb_data.first())
#start train model
num_iterations = 10
max_tree_depth = 5
lr_model = LogisticRegressionWithLBFGS().train(data, num_iterations)
print("logistic regression model :")
print(lr_model)
svm_model = SVMWithSGD().train(data, num_iterations)
print("svm model :")
print(svm_model)
nb_model = NaiveBayes().train(nb_data)
print("naive bayes model :")
print(nb_model)
dt_model = DecisionTree().trainClassifier(data, 2, {})
print("decision tree model :")
print(dt_model)
#start predict
data_point = data.first()
lr_prediction = lr_model.predict(data_point.features)
print("logistic model prediction :" + str(lr_prediction))
print("the true label :" + str(data_point.label))
#analyze data
vectors = data.map(lambda lp : lp.features)
matrix = RowMatrix(vectors)
matrix_summary = matrix.computeColumnSummaryStatistics()
print("the col mean of matrix :")
print(matrix_summary.mean())
print("the col min of matrix :")
print(matrix_summary.min())
print("the col max of matrix :")
print(matrix_summary.max())
print("the col variance of matrix :")
print(matrix_summary.variance())
print("the col num non zero of matrix :")
print(matrix_summary.numNonzeros())
#transform data from data to standard scalar
scaler = StandardScaler(withMean = True, withStd = True).fit(vectors)
labels = data.map(lambda lp : lp.label)
features_transformed = scaler.transform(vectors)
scaled_data = (labels.zip(features_transformed).map(lambda p : LabeledPoint(p[0], p[1])))
print("transformation before :")
print(data.first().features)
print("transformation after :")
print(scaled_data.first().features)
#train logistic regression use scaled data
lr_model_scaled = LogisticRegressionWithLBFGS().train(scaled_data, num_iterations)
print("logistic regression model use scaled data :")
print(lr_model_scaled)
# def total_correct_scaled(sd):
# if lr_model_scaled.predict(sd.features) == sd.label:
# return 1
# else:
# return 0
# lr_total_correct_scaled = scaled_data.map(lambda sd : total_correct_scaled(sd)).sum()
# print(lr_total_correct_scaled)
# lr_accuracy_scaled = float(lr_total_correct_scaled)/float(num_data)
# print("logistic regression accuracy scaled :")
# print(lr_accuracy_scaled) #the memory is enough
sc.stop() | [
"lovejing0306@gmail.com"
] | lovejing0306@gmail.com |
663cfd01044341ba3397f1eb66fd1007ef15ba9a | a7b07e14f58008e4c9567a9ae67429cedf00e1dc | /docs/jnpr_healthbot_swagger/swagger_client/models/rule_schema_formula_stddev.py | 77fe0829a2dbee849bd20fa490be0d4d15af7329 | [
"Apache-2.0"
] | permissive | dmontagner/healthbot-py-client | 3750d8375bc4fa7bedcdbc6f85f17fb812c19ea9 | 0952e0a9e7ed63c9fe84879f40407c3327735252 | refs/heads/master | 2020-08-03T12:16:38.428848 | 2019-09-30T01:57:24 | 2019-09-30T01:57:24 | 211,750,200 | 0 | 0 | Apache-2.0 | 2019-09-30T01:17:48 | 2019-09-30T01:17:47 | null | UTF-8 | Python | false | false | 4,780 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaFormulaStddev(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'field_name': 'str',
'time_range': 'str'
}
attribute_map = {
'field_name': 'field-name',
'time_range': 'time-range'
}
def __init__(self, field_name=None, time_range=None): # noqa: E501
"""RuleSchemaFormulaStddev - a model defined in Swagger""" # noqa: E501
self._field_name = None
self._time_range = None
self.discriminator = None
self.field_name = field_name
self.time_range = time_range
@property
def field_name(self):
"""Gets the field_name of this RuleSchemaFormulaStddev. # noqa: E501
Field name on which standard deviation operation needs to be performed # noqa: E501
:return: The field_name of this RuleSchemaFormulaStddev. # noqa: E501
:rtype: str
"""
return self._field_name
@field_name.setter
def field_name(self, field_name):
"""Sets the field_name of this RuleSchemaFormulaStddev.
Field name on which standard deviation operation needs to be performed # noqa: E501
:param field_name: The field_name of this RuleSchemaFormulaStddev. # noqa: E501
:type: str
"""
if field_name is None:
raise ValueError("Invalid value for `field_name`, must not be `None`") # noqa: E501
self._field_name = field_name
@property
def time_range(self):
"""Gets the time_range of this RuleSchemaFormulaStddev. # noqa: E501
How much back in time should we look for data. Specify positive integer followed by s/m/h/d/w/y representing seconds/minutes/hours/days/weeks/years. Eg: 2s # noqa: E501
:return: The time_range of this RuleSchemaFormulaStddev. # noqa: E501
:rtype: str
"""
return self._time_range
@time_range.setter
def time_range(self, time_range):
"""Sets the time_range of this RuleSchemaFormulaStddev.
How much back in time should we look for data. Specify positive integer followed by s/m/h/d/w/y representing seconds/minutes/hours/days/weeks/years. Eg: 2s # noqa: E501
:param time_range: The time_range of this RuleSchemaFormulaStddev. # noqa: E501
:type: str
"""
if time_range is None:
raise ValueError("Invalid value for `time_range`, must not be `None`") # noqa: E501
if time_range is not None and not re.search('^[1-9][0-9]*[smhdwy]$', time_range): # noqa: E501
raise ValueError("Invalid value for `time_range`, must be a follow pattern or equal to `/^[1-9][0-9]*[smhdwy]$/`") # noqa: E501
self._time_range = time_range
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaFormulaStddev):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"nitinkr@juniper.net"
] | nitinkr@juniper.net |
f08cb6551ae9483477d79afa14b9d3a1152bdb97 | 3a512b1ef962a77cc358747e51a565b0311fb5d3 | /tests/run_tests.py | c2e2de73bcb479d957f4067c2e61158b279dcb44 | [] | no_license | gokererdogan/rllib | 2f19ce8bcb011f213e66b61e5fd3ab3d68a8fc2a | 3052a9c6c95d3c8d5dc833bff0d8a8a01d8f360a | refs/heads/master | 2021-01-09T20:26:23.181937 | 2016-10-09T00:16:18 | 2016-10-09T00:16:18 | 62,526,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | """
rllib - Reinforcement Learning Library
Script for running tests. Enables the user to skip slow tests and run only the selected test modules.
Run using
python run_tests.py module1 module2 ... --skipslow
To run coverage analysis run (requires coverage.py to be installed)
coverage run --source ../ run_tests.py module1 module 2 ... --skipslow
Goker Erdogan
https://github.com/gokererdogan/
"""
import unittest
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run rllib unittests.")
parser.add_argument('modules', type=str, nargs='+', help='Test module names to run. If discover, '
'uses unittest.discover to find tests in '
'the current folder.')
# note that --skipslow parameter seems to have no effect here but it is checked in TestCase classes using
# unittest skipIf decorator.
parser.add_argument('--skipslow', action='store_true', help='Do not run slow tests.')
args = parser.parse_args()
loader = unittest.TestLoader()
if 'discover' in args.modules:
tests = loader.discover('./')
else:
tests = loader.loadTestsFromNames(args.modules)
unittest.TextTestRunner(verbosity=2).run(tests)
| [
"gokererdogan@gmail.com"
] | gokererdogan@gmail.com |
763f837f2946f23e4779eb98a1dd18d31f76bad3 | 3f66c9877f0c8a394dbc1fa98dedb9273316b175 | /services/github.py | c9fd6b5f5763e372632c8f203c01c1affbfc053a | [
"MIT"
] | permissive | onejgordon/flow-dashboard | c06f6760d0087cebebe75102b543ac35d7aa8469 | b8d85d9313e51cf386f6d2e5944fc958a7d96769 | refs/heads/develop | 2023-09-03T12:20:57.223724 | 2023-07-02T15:55:02 | 2023-07-02T15:55:02 | 84,657,014 | 1,801 | 250 | MIT | 2023-04-01T02:06:25 | 2017-03-11T14:46:24 | Python | UTF-8 | Python | false | false | 2,131 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# API calls to interact with Github
from google.appengine.api import urlfetch
import base64
import json
import logging
import urllib
from datetime import datetime, timedelta, time
from google.appengine.api import memcache
import tools
from bs4 import BeautifulSoup
BASE = 'https://api.github.com'
REPO_MEMKEY = "GITHUB:%s"
GH_DATE = "%Y-%m-%dT%H:%M:%SZ"
class GithubClient(object):
def __init__(self, user):
self.user = user
self.pat = self.user.get_integration_prop('github_pat')
self.github_username = self.user.get_integration_prop('github_username')
def _can_run(self):
return self.pat and self.github_username
def _parse_raw_date(self, date):
return datetime.strptime(date, GH_DATE)
def api_call(self, url):
'''
Return tuple (response_object, json parsed response)
'''
if not url.startswith('http'):
url = BASE + url
auth_header = {"Authorization": "Basic %s" % base64.b64encode("%s:%s" % (self.github_username, self.pat))}
logging.debug("GET %s" % url)
response = urlfetch.fetch(url, method="GET", deadline=60, headers=auth_header)
if response.status_code == 200:
return (response, json.loads(response.content))
else:
logging.debug(response.content)
return (response, None)
def get_contributions_on_date_range(self, date_range):
'''
Currently scraping Github public overview page (no API yet)
'''
response = urlfetch.fetch("https://github.com/%s?tab=overview" % self.github_username, deadline=30)
if response.status_code == 200:
bs = BeautifulSoup(response.content, "html.parser")
commits_dict = {}
for date in date_range:
iso_date = tools.iso_date(date)
commits_on_day = bs.find('rect', {'data-date': iso_date}).get('data-count', 0)
commits_dict[date] = commits_on_day
return commits_dict
else:
logging.error("Error getting contributions")
| [
"onejgordon@gmail.com"
] | onejgordon@gmail.com |
c270c18498a91bafae0c1cd098cfe070ee348d68 | 868e3f5c10a8043134aab7bc3d546b62e3d158c7 | /caffe2/python/operator_test/reduce_ops_test.py | 8595cc0d355c56c54f81892c12c96ed49c93641e | [
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | youyou3/caffe2 | 583cf3c96b5b8bbaa5891aae18b0a07289a14183 | 4f6b8a0cf984ef7ac908f2ec7b3d292ed915ede8 | refs/heads/master | 2021-01-20T15:26:36.207631 | 2018-08-04T08:55:38 | 2018-08-04T08:55:38 | 82,814,821 | 0 | 0 | null | 2017-02-22T14:38:36 | 2017-02-22T14:38:36 | null | UTF-8 | Python | false | false | 1,569 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestReduceFrontSum(hu.HypothesisTestCase):
def reduce_op_test(self, op_name, op_ref, in_data, num_reduce_dims, device):
op = core.CreateOperator(
op_name,
["inputs"],
["outputs"],
num_reduce_dim=num_reduce_dims
)
self.assertReferenceChecks(
device_option=device,
op=op,
inputs=[in_data],
reference=op_ref
)
self.assertGradientChecks(
device, op, [in_data], 0, [0], stepsize=1e-2, threshold=1e-2)
@given(num_reduce_dim=st.integers(1, 3), **hu.gcs)
def test_reduce_from_sum(self, num_reduce_dim, gc, dc):
X = np.random.rand(7, 4, 3, 5).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test("ReduceFrontSum", ref_sum, X, num_reduce_dim, gc)
@given(num_reduce_dim=st.integers(1, 3), **hu.gcs)
def test_reduce_from_mean(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_mean(X):
return [np.mean(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test("ReduceFrontMean", ref_mean, X, num_reduce_dim, gc)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
0f43ddb205e564af927fdbd2a8373be27ac57c82 | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /contrib/python/pyzmq/py2/zmq/green/device.py | 4b070237e312255dd3815cc49cf58b4f5529ca47 | [
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-zeromq-exception-lgpl-3.0",
"BSD-3-Clause"
] | permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 950 | py | # Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import zmq
from zmq.green import Poller
def device(device_type, isocket, osocket):
"""Start a zeromq device (gevent-compatible).
Unlike the true zmq.device, this does not release the GIL.
Parameters
----------
device_type : (QUEUE, FORWARDER, STREAMER)
The type of device to start (ignored).
isocket : Socket
The Socket instance for the incoming traffic.
osocket : Socket
The Socket instance for the outbound traffic.
"""
p = Poller()
if osocket == -1:
osocket = isocket
p.register(isocket, zmq.POLLIN)
p.register(osocket, zmq.POLLIN)
while True:
events = dict(p.poll())
if isocket in events:
osocket.send_multipart(isocket.recv_multipart())
if osocket in events:
isocket.send_multipart(osocket.recv_multipart())
| [
"akhropov@yandex-team.com"
] | akhropov@yandex-team.com |
bbafea7bc0e26f656674d86b73ec98bea6ca6bc5 | 9573a059adc7e5524cfdc4578ac5440be1878a62 | /examples/benchmarks/json/parsers/parsimonious_json.py | 90f96a73f8e5691fe9c5320865967900e6ae8495 | [
"MIT"
] | permissive | eerimoq/textparser | a1764fa06262b1355927a6573ebc7a8f4c51d482 | 1ef809eb283da3c3ec7b8bc682f11eeada3a81d6 | refs/heads/master | 2022-04-29T22:42:30.462809 | 2022-04-16T09:00:09 | 2022-04-16T09:00:09 | 141,811,843 | 32 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | """Based on
https://gist.github.com/goodmami/686385b4b39a3bac00fbbe78a5cda6c8, by
Michael Wayne Goodman.
"""
import timeit
from parsimonious.grammar import Grammar
grammar = Grammar(
r"""
Start = ~"\s*" ( Object / Array ) ~"\s*"
Object = ~"{\s*" Members? ~"\s*}"
Members = MappingComma* Mapping
MappingComma = Mapping ~"\s*,\s*"
Mapping = DQString ~"\s*:\s*" Value
Array = ~"\[\s*" Items? ~"\s*\]"
Items = ValueComma* Value
ValueComma = Value ~"\s*,\s*"
Value = Object / Array / DQString
/ TrueVal / FalseVal / NullVal / Float / Integer
TrueVal = "true"
FalseVal = "false"
NullVal = "null"
DQString = ~"\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\""
Float = ~"[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?"
Integer = ~"[-+]?\d+"
""")
def parse_time(json_string, iterations):
def _parse():
grammar.parse(json_string)
return timeit.timeit(_parse, number=iterations)
def parse(json_string):
return grammar.parse(json_string)
def version():
return 'unknown'
| [
"erik.moqvist@gmail.com"
] | erik.moqvist@gmail.com |
d29a70b5ace0f73e09a5fdd23a57fd314c755741 | 849a174efea976d4daed419b85668c2ba05fd2b9 | /algorithms/dynamic/binomial_coefficient_2.py | 97d61b41fa2af5a1780910a98b525b1ae8fa6082 | [] | no_license | samyuktahegde/Python | 61e6fedbdd2a94b29e4475621afa6d5e98bf49b8 | b02fa6e908661a918e0024f508df0192d5553411 | refs/heads/master | 2018-09-18T20:27:55.980689 | 2018-08-09T05:49:33 | 2018-08-09T05:49:33 | 116,491,078 | 0 | 0 | null | 2018-02-05T05:33:53 | 2018-01-06T14:52:16 | null | UTF-8 | Python | false | false | 403 | py | def binomial_coeffient(n, k):
c = [[0 for x in range(k+1)] for x in range(n+1)]
for i in range(n+1):
for j in range(min(i, k)+1):
if j==0 or j==i:
c[i][j] = 1
else:
c[i][j] = c[i-1][j-1]+c[i-1][j]
return c[n][k]
n = 4
k = 2
print("Value of C[" + str(n) + "][" + str(k) + "] is " + str(binomial_coeffient(n,k)))
| [
"noreply@github.com"
] | samyuktahegde.noreply@github.com |
dcdeadf0874314325430c005262caf34f5d57e95 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_chequebooks.py | a64bccb00541ab2be6bdffd65c502995c5fc16bc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py |
from xai.brain.wordbase.nouns._chequebook import _CHEQUEBOOK
#calss header
class _CHEQUEBOOKS(_CHEQUEBOOK, ):
def __init__(self,):
_CHEQUEBOOK.__init__(self)
self.name = "CHEQUEBOOKS"
self.specie = 'nouns'
self.basic = "chequebook"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
43683693c70eb2d9afe8f668fac78a3d2a457a53 | 20674c17d815214bf66b75be686bb8a45c0f5914 | /version1/884_Uncommon_Words_from_Two_Sentences.py | 8b901dd8a2004bae8830fb7a37a2c6d05e11ca06 | [] | no_license | moontree/leetcode | e7b670969fe20785b15aae82996875fd66de1b08 | f2bf9b13508cd01c8f383789569e55a438f77202 | refs/heads/master | 2021-05-20T20:36:45.615420 | 2020-04-02T09:15:26 | 2020-04-02T09:15:26 | 252,408,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | """
We are given two sentences A and B.
(A sentence is a string of space separated words.
Each word consists only of lowercase letters.)
A word is uncommon if it appears exactly once in one of the sentences,
and does not appear in the other sentence.
Return a list of all uncommon words.
You may return the list in any order.
Example 1:
Input:
A = "this apple is sweet",
B = "this apple is sour"
Output:
["sweet","sour"]
Example 2:
Input:
A = "apple apple",
B = "banana"
Output:
["banana"]
Note:
0 <= A.length <= 200
0 <= B.length <= 200
A and B both contain only spaces and lowercase letters.
"""
class Solution(object):
def uncommonFromSentences(self, A, B):
"""
:type A: str
:type B: str
:rtype: List[str]
"""
cache = {}
for word in A.split():
cache[word] = cache.get(word, 0) + 1
for word in B.split():
cache[word] = cache.get(word, 0) + 1
return [key for key in cache if cache[key] == 1]
examples = [
{
"input": {
"A": "this apple is sweet",
"B": "this apple is sour"
},
"output": ["sweet", "sour"]
}, {
"input": {
"A": "apple apple",
"B": "banana"
},
"output": ["banana"]
}
]
if __name__ == '__main__':
solution = Solution()
for n in dir(solution):
if not n.startswith('__'):
func = getattr(solution, n)
print(func)
for example in examples:
print '----------'
v = func(**example['input'])
print v, v == example['output'] | [
"zhangchao@zhangchaodeMacBook-Pro.local"
] | zhangchao@zhangchaodeMacBook-Pro.local |
186b4e5def4f17606a3f1234fae8c50ac9b8dfb9 | d5125ccc1ef9915ffd72c575225a620aac5cb347 | /study_scripts/python_learning_book/timeseqs.py | b235e262e790b0fcbadc02727377627af9a9e387 | [] | no_license | yurui829/stefanbo | 2231074e0e4f04438aff647563299ad1947bd760 | 449f862c81a3b4ae3e079ecb4a15b3a5cbcca701 | refs/heads/master | 2021-01-24T23:42:52.064783 | 2014-07-02T03:05:04 | 2014-07-02T03:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | #!/usr/bin/env python
# File timeseqs.py
import sys, mytimer
reps = 10000
repslist = range(reps)
def forLoop():
res = []
for x in repslist:
res.append(abs(x))
return res
def listComp():
return [abs(x) for x in repslist]
def mapCall():
return list(map(abs, repslist))
def genExpr():
return list(abs(x) for x in repslist)
def genFunc():
def gen():
for x in repslist:
yield abs(x)
return list(gen())
print(sys.version)
for tester in (mytimer.timer, mytimer.best):
print('<%s>' % tester.__name__)
for test in (forLoop, listComp, mapCall, genExpr, genFunc):
elapsed, result = tester(test)
print ('-' * 35)
print ('%-9s: %.5f => [%s...%s]') % (test.__name__, elapsed, result[0], result[-1])
| [
"stefan_bo@163.com"
] | stefan_bo@163.com |
df0043829249d008722b8d6b71f36bc0d9f853ff | cf43a84a9f1ea5983c63a14f55a60c2c6f5bbedb | /setup.py | cc0e1a8f1ca2ea5fb3daf9267f5baae0e3561852 | [] | no_license | research-core/core-people | 66f6862527755c09d537cc7740af5ebc470b9b83 | f017002d39d1112415ce326d6aeb0b86fba6293b | refs/heads/master | 2020-06-29T03:16:20.229962 | 2019-08-29T14:30:39 | 2019-08-29T14:30:39 | 200,423,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
version, license = None, None
with open('people/__init__.py', 'r') as fd:
content = fd.read()
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
license = re.search(r'^__license__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
if version is None: raise RuntimeError('Cannot find version information')
if license is None: raise RuntimeError('Cannot find license information')
with open('README.md', 'r') as fd:
long_description = fd.read()
setup(
name='core-people',
version=version,
description='Research CORE ERM - people module',
author='Ricardo Ribeiro, Hugo Cachitas',
author_email='ricardojvr@gmail.com, hugo.cachitas@research.fchampalimaud.org',
url='https://github.com/research-core/core-people',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
license=license,
install_requires=['core-common'],
package_data={
'people': [
'fixtures/initial_data.yaml',
'static/img/*.png',
'static/*.png',
]
},
)
| [
"ricardojvr@gmail.com"
] | ricardojvr@gmail.com |
5e22581eb4731d03a947c87cd853a93c1fbe0dab | 5a6d7fb808de598a9a13f90855d13ea7c8212f92 | /deeptech/training/optimizers/_smart_optimizer.py | d9f6fbc4d4a1581f8d2b9235aac483443d057d7e | [
"MIT"
] | permissive | penguinmenac3/deeptech | 0b4f48c59b6e4b1d00a2d5b3d876c27ab3f4f2da | 0c7fb170d62f193dbbb2018f7b8d42f713178bb8 | refs/heads/main | 2023-03-08T17:44:30.222433 | 2021-03-02T21:28:46 | 2021-03-02T21:28:46 | 302,860,310 | 1 | 0 | MIT | 2021-01-11T22:12:58 | 2020-10-10T09:04:01 | Python | UTF-8 | Python | false | false | 1,019 | py | """doc
# deeptech.training.optimizers._smart_optimizer
> Automatically create an optimizer with the parameters of the model.
"""
from deeptech.core.config import inject_kwargs
def smart_optimizer(optimizer, *args, **kwargs):
"""
Convert a pytorch optimizer into a lambda function that expects the config, model and loss as parameters, to instantiate the optimizer with all trainable parameters.
:param optimizer: A pytorch optimizer that should be made smart.
:param *args: Any ordered arguments the original optimizer expects.
:param **kwargs: Any named arguments the original optimizer expects.
"""
def _join_parameters(model, loss):
model_params = list(model.parameters())
loss_params = list(loss.parameters())
return model_params + loss_params
@inject_kwargs()
def create_optimizer(model, loss, training_initial_lr=None):
return optimizer(_join_parameters(model, loss), training_initial_lr, *args, **kwargs)
return create_optimizer
| [
"mail@michaelfuerst.de"
] | mail@michaelfuerst.de |
4c7ba0f6cd28b20062f6c3264106a480b402adcd | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /kjph2fGDWmLKY2n2J_3.py | b6224dd24a7a90e281db3dff5da27cfb06543ce7 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
import re
def valid_color (color):
return bool(re.match("rgb(a)?\((,?((?=\d{1,3}\%)(100|\d{1,2})\%|(25[0-5]|1?\d{1,2}))){3}(?(1),(1\.0*|0?\.?\d+))\)", "".join(color.split()))) and (color.startswith("rgb(") or color.startswith("rgba("))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9539b719de78e4f40c061393f9735ce08c61d9fe | f797aecc0a7847aefc228c097a86ffc9cc1f5cc3 | /hongkong/hongkong/spiders/HKEX_delisted_company_list.py | a12c31e656ad7825a62a1af59965e0e2c9dde4cf | [] | no_license | waynecanfly/spiderItemV2 | b359ac773bb9fbfbf4f893704d542654bd3994e3 | 972a5fb002d051a2630b40c9e6582392daf22d0f | refs/heads/master | 2020-10-01T08:35:35.888512 | 2019-12-14T02:52:06 | 2019-12-14T02:52:06 | 227,500,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | # -*- coding: utf-8 -*-
import time
import scrapy
from hongkong.items import HongKongDelistedCompanyItem
from samples.base_rule import HKEXIsNewDelistedSec
class HkexDelistedCompanyListSpider(scrapy.Spider):
'''获取退上市公司列表'''
name = 'HKEX_delisted_company_list'
allowed_domains = ['webb-site.com']
start_urls = ['http://webb-site.com/']
market_url_dict = {
'main': 'https://webb-site.com/dbpub/delisted.asp?s=nameup&t=s&e=m',
'gem': 'https://webb-site.com/dbpub/delisted.asp?s=nameup&t=s&e=g'
}
def start_requests(self):
new_market_url_dict = {v: k for k, v in self.market_url_dict.items()}
for url in self.market_url_dict.values():
bond_info = new_market_url_dict[url]
yield scrapy.Request(url=url, callback=self.parse, meta={
'bond_info': bond_info,
})
# print(new_market_url_dict)
def parse(self, response):
market_type = response.meta['bond_info']
infos = response.xpath("//body/div[@class='mainbody']/table[@class='numtable']")
info_list = infos.xpath("//tr")
info_list.pop(0)
for info in info_list:
stock_code = info.xpath("./td[2]/a/text()").extract()
issuer = info.xpath("./td[4]/a/text()").extract()
first_trade = info.xpath("./td[5]/text()").extract()
last_trade = info.xpath("./td[6]/text()").extract()
delisted_date = info.xpath("./td[7]/text()").extract()
trading_life_years = info.xpath("./td[8]/text()").extract()
reason = info.xpath("./td[9]/text()").extract()
# 以下代码重复严重,可以重写
if len(stock_code) == 0:
stock_code = 'Null'
else:
stock_code = stock_code[0]
if len(issuer) == 0:
issuer = 'Null'
else:
issuer = issuer[0]
if len(first_trade) == 0:
first_trade = 'Null'
else:
first_trade = first_trade[0]
if len(last_trade) == 0:
last_trade = 'Null'
else:
last_trade = last_trade[0]
if len(delisted_date) == 0:
delisted_date = 'Null'
else:
delisted_date = delisted_date[0]
if len(trading_life_years) == 0:
trading_life_years = 'Null'
else:
trading_life_years = trading_life_years[0]
if len(reason) == 0:
reason = 'Null'
else:
reason = reason[0]
gmt_create = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if not HKEXIsNewDelistedSec(stock_code):
item = HongKongDelistedCompanyItem()
item['country_code'] = 'HKG'
item['exchange_market_code'] = 'HKEX'
item['security_code'] = stock_code
item['issuer'] = issuer
item['first_trade'] = first_trade
item['market_type'] = market_type
item['last_trade'] = last_trade
item['delisting_date'] = delisted_date
item['trading_life_years'] = trading_life_years
item['status'] = -2
item['reason'] = reason
item['gmt_create'] = gmt_create
item['user_create'] = 'cf'
yield item | [
"1370153124@qq.com"
] | 1370153124@qq.com |
2529c9992a9b2928cc03b54996a982008abac123 | f516b7561b93f640bcb376766a7ecc3440dcbb99 | /leetcode/easy/remove-duplicates-from-sorted-array.py | d220514e45221042eecda3ade154765409ac3fa4 | [
"Apache-2.0"
] | permissive | vtemian/interviews-prep | c41e1399cdaac9653c76d09598612f7450e6d302 | ddef96b5ecc699a590376a892a804c143fe18034 | refs/heads/master | 2020-04-30T15:44:42.116286 | 2019-09-10T19:41:41 | 2019-09-10T19:41:41 | 176,928,167 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
if len(nums) < 2:
return len(nums)
start = 0
moving = 1
while moving < len(nums):
while moving < len(nums) and nums[start] == nums[moving]:
moving += 1
if moving == len(nums):
break
nums[start + 1] = nums[moving]
start += 1
moving += 1
return start + 1
| [
"vladtemian@gmail.com"
] | vladtemian@gmail.com |
54d9fd3cbc0666b68171746496af21098c4d479a | b26674cda3264ad16af39333d79a700b72587736 | /corehq/apps/change_feed/producer.py | 7f48ff3b7aa3399ac0b571e40e188b2d41e61c2b | [] | no_license | tlwakwella/commcare-hq | 2835206d8db84ff142f705dbdd171e85579fbf43 | a3ac7210b77bea6c2d0392df207d191496118872 | refs/heads/master | 2021-01-18T02:07:09.268150 | 2016-03-24T14:12:49 | 2016-03-24T14:12:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | from __future__ import unicode_literals
import json
import time
from corehq.util.soft_assert import soft_assert
from kafka import KeyedProducer
from kafka.common import LeaderNotAvailableError, FailedPayloadsError, KafkaUnavailableError
from corehq.apps.change_feed.connection import get_kafka_client_or_none
import logging
def send_to_kafka(producer, topic, change_meta):
def _send_to_kafka():
producer.send_messages(
bytes(topic),
bytes(change_meta.domain.encode('utf-8') if change_meta.domain is not None else None),
bytes(json.dumps(change_meta.to_json())),
)
try:
tries = 3
for i in range(tries):
# try a few times because the python kafka libraries can trigger timeouts
# if they are idle for a while.
try:
_send_to_kafka()
break
except (FailedPayloadsError, KafkaUnavailableError):
if i == (tries - 1):
# if it's the last try, fail hard
raise
except LeaderNotAvailableError:
# kafka seems to be down. sleep a bit to avoid crazy amounts of error spam
time.sleep(15)
raise
except Exception as e:
_assert = soft_assert(to='@'.join(['czue', 'dimagi.com']))
_assert(False, 'Problem sending change to kafka {}: {} ({})'.format(
change_meta.to_json(), e, type(e)
))
raise
class ChangeProducer(object):
def __init__(self, kafka=None):
self._kafka = kafka
self._producer = None
self._has_error = False
@property
def kafka(self):
# load everything lazily to avoid doing this work if not needed
if self._kafka is None and not self._has_error:
self._kafka = get_kafka_client_or_none()
if self._kafka is None:
logging.warning('Kafka is not available! Change producer is doing nothing.')
self._has_error = True
return self._kafka
@property
def producer(self):
if self._producer is None and not self._has_error:
if self.kafka is not None:
self._producer = KeyedProducer(self._kafka)
else:
# if self.kafka is None then we should be in an error state
assert self._has_error
return self._producer
def send_change(self, topic, change_meta):
if self.producer:
send_to_kafka(self.producer, topic, change_meta)
producer = ChangeProducer()
| [
"czue@dimagi.com"
] | czue@dimagi.com |
fca11b46debbd03649c2ea39df9ed1f363eb9fa5 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-as/huaweicloudsdkas/v1/model/bandwidth_result.py | 7ed77075e6464937dd2977fd447ada089a4289e8 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,440 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BandwidthResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'size': 'int',
'share_type': 'str',
'charging_mode': 'str',
'id': 'str'
}
attribute_map = {
'size': 'size',
'share_type': 'share_type',
'charging_mode': 'charging_mode',
'id': 'id'
}
def __init__(self, size=None, share_type=None, charging_mode=None, id=None):
"""BandwidthResult - a model defined in huaweicloud sdk"""
self._size = None
self._share_type = None
self._charging_mode = None
self._id = None
self.discriminator = None
if size is not None:
self.size = size
if share_type is not None:
self.share_type = share_type
if charging_mode is not None:
self.charging_mode = charging_mode
if id is not None:
self.id = id
@property
def size(self):
"""Gets the size of this BandwidthResult.
带宽(Mbit/s)。
:return: The size of this BandwidthResult.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this BandwidthResult.
带宽(Mbit/s)。
:param size: The size of this BandwidthResult.
:type: int
"""
self._size = size
@property
def share_type(self):
"""Gets the share_type of this BandwidthResult.
带宽的共享类型。共享类型枚举:PER,表示独享。目前只支持独享。
:return: The share_type of this BandwidthResult.
:rtype: str
"""
return self._share_type
@share_type.setter
def share_type(self, share_type):
"""Sets the share_type of this BandwidthResult.
带宽的共享类型。共享类型枚举:PER,表示独享。目前只支持独享。
:param share_type: The share_type of this BandwidthResult.
:type: str
"""
self._share_type = share_type
@property
def charging_mode(self):
"""Gets the charging_mode of this BandwidthResult.
带宽的计费类型。字段值为“bandwidth”,表示按带宽计费。字段值为“traffic”,表示按流量计费。
:return: The charging_mode of this BandwidthResult.
:rtype: str
"""
return self._charging_mode
@charging_mode.setter
def charging_mode(self, charging_mode):
"""Sets the charging_mode of this BandwidthResult.
带宽的计费类型。字段值为“bandwidth”,表示按带宽计费。字段值为“traffic”,表示按流量计费。
:param charging_mode: The charging_mode of this BandwidthResult.
:type: str
"""
self._charging_mode = charging_mode
@property
def id(self):
"""Gets the id of this BandwidthResult.
带宽ID,创建WHOLE类型带宽的弹性IP时指定的共享带宽。
:return: The id of this BandwidthResult.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this BandwidthResult.
带宽ID,创建WHOLE类型带宽的弹性IP时指定的共享带宽。
:param id: The id of this BandwidthResult.
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BandwidthResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
593e23c23bff91b3481cdb1e25deadc33108d1dc | 3a28b1a12d0710c06f6360381ad8be6cf3707907 | /modular_model/triHPC/triHPCThermo/HPCAllTrays13CstmLiqEtlp_pP.py | 684b0d0f0124a7bc311424edfc52f261b9f1d358 | [] | no_license | WheatZhang/DynamicModelling | 6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02 | ea099245135fe73e8c9590502b9c8b87768cb165 | refs/heads/master | 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | def LiqEtlp_pP(P,T,x_N2):
x = (P-5.56380000e+02)/3.71707300e-01
y = (T--1.77068007e+02)/5.35543333e-02
z = (x_N2-9.17851528e-01)/8.26935123e-03
output = \
1*2.41243980e+01
liq_etlp = output*1.00000000e+00+0.00000000e+00
return liq_etlp | [
"1052632241@qq.com"
] | 1052632241@qq.com |
ee43bb18ca6eafa1bfb32ed0af4d657f0fad1558 | 33211b03bc7c0e13ad2b39938e99851ad206332e | /ragdoll/__init__.py | 506256fa48165e55e0b48b94f152cb273f8974be | [] | no_license | wangsen992/ragdoll | fe341f0c6acc9842e9b81851cf8ca864f87b0352 | cdd4bd8a7fa5286af8749c0157fe51653543a033 | refs/heads/master | 2021-06-23T00:23:54.738709 | 2017-07-25T04:26:05 | 2017-07-25T04:26:05 | 94,606,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | """
Ragdoll initialisation.
"""
from .db import *
from .composite import *
from .nutrient import *
from .flyweight import *
from .dictionary import *
from .human import *
from .req import *
from .plots import *
__version__=0.1 | [
"wangsen992@gmail.com"
] | wangsen992@gmail.com |
d43f17b2a9272f382d3b9edea82ed5030ed82918 | 17be0e9275082c3239fedc11bc617ecd5856136c | /letor/offline/query_characterizer.py | ba3ab26b9b27e8d9bd178eff043fddddbf869187 | [] | no_license | mdkmongo/semantichealth.github.io | 8bb814bfd3b0b3a71828625a2acebfd8013e2eef | 6462ba2cc406967b0371b09822e4c26860e96c91 | refs/heads/master | 2021-01-21T08:24:07.128484 | 2016-08-19T05:35:04 | 2016-08-19T05:35:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | from sklearn.feature_extraction.text import CountVectorizer
from get_query_clusters import *
import numpy as np
def query_characterizer(queries, log, similarity_limit = 0.9):
'''
queries - list of string for queries
return - list of integers to indicate cluster for each query
'''
# vectorize queries
log.trace('characterizing %d queries' %queries.shape[0])
characterizer = CountVectorizer()
encoded_query = characterizer.fit_transform(queries)
# set all values to 1 of encoded query (don't care duplicate terms in query)
encoded_query.data = np.ones(encoded_query.data.size)
# find the optimal clusters based on minimum within cluster distance
min_sim, k = 0, 0
while min_sim < similarity_limit:
k += 1
clusters, min_sim, centroids = get_query_clusters(encoded_query, k, log)
log.trace('characterizing queries with k = %d, minimum similarity is %.4f' %(k, min_sim))
return clusters, characterizer.vocabulary_, centroids.toarray() #, avg_sim, k
| [
"ynglei@gmail.com"
] | ynglei@gmail.com |
ea4fb014273cdd117adbdffd61693eb7335a22b3 | 8324db17c426d83d95ce668ee6c7914eec4c7cc1 | /app/user/v1/services/otp_service.py | 44e93ee257a4d04001b22875deeff64064e8f431 | [] | no_license | iCodeIN/whatsapp_clone_backend | 21224f994a7d6e901aeff16f3c461bab72d720a3 | abcbb8fad81feb5e697af61277a21bc99e3ca81b | refs/heads/master | 2023-02-23T17:28:15.960392 | 2021-01-25T16:29:06 | 2021-01-25T16:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | from helpers.cache_adapter import CacheAdapter
from helpers.misc_helper import get_random_number
class OTPService():
def __init__(self, mobile_number):
self.mobile_number = mobile_number
self.OTP_PREFIX = 'OTP_'
self.OTP_EXPIRY = 600 # in seconds
self.cache_adapter = CacheAdapter()
def get_otp(self):
"""
Returns the OTP stored in cache for
the given number
"""
key = self.OTP_PREFIX + self.mobile_number
return self.cache_adapter.get(key)
def clear_otp(self):
"""
Clears the OTP from the cache
"""
key = self.OTP_PREFIX + self.mobile_number
self.cache_adapter.delete(key)
def generate_otp(self):
"""
Generates OTP for the given mobile number
"""
key = self.OTP_PREFIX + self.mobile_number
# Gets a new OTP if not present in cache,
# and if it's already present, returns the same
# OTP
one_time_password = self.cache_adapter.get(key)
if one_time_password is None:
one_time_password = get_random_number()
# setting the OTP in cache
self.cache_adapter.set(
key,
one_time_password,
self.OTP_EXPIRY
)
return one_time_password
| [
"sajal.4591@gmail.com"
] | sajal.4591@gmail.com |
7d5b92bd2af2037c9ecef1441beb9ad0fb39fa58 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/classification/GoogleNet_ID1623_for_PyTorch/demo.py | 61e10d8b90967fcf483c8eac545a688daeaa25ac | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,723 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# -*- coding: utf-8 -*-
"""demo.py
"""
import os
import torch
import numpy as np
from googlenet import googlenet
import argparse
from apex import amp
import apex
import torch.distributed as dist
parser = argparse.ArgumentParser(description=' googlenet demo ')
parser.add_argument('--device', default='npu', type=str,
help='npu or gpu')
parser.add_argument('--device-list', default='0,1,2,3,4,5,6,7', type=str, help='device id list')
parser.add_argument('--dist-backend', default='hccl', type=str,
help='distributed backend')
parser.add_argument('--addr', default='192.168.88.3', type=str,
help='master addr')
'''
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.amp:
amp.load_state_dict(checkpoint['amp'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
'''
def device_id_to_process_device_map(device_list):
devices = device_list.split(",")
devices = [int(x) for x in devices]
devices.sort()
process_device_map = dict()
for process_id, device_id in enumerate(devices):
process_device_map[process_id] = device_id
return process_device_map
def build_model():
global loc
# 请自定义模型并加载预训练模型
args = parser.parse_args()
args.process_device_map = device_id_to_process_device_map(args.device_list)
os.environ['MASTER_ADDR'] = args.addr
os.environ['MASTER_PORT'] = '29688'
ngpus_per_node = len(args.process_device_map)
dist.init_process_group(backend=args.dist_backend, # init_method=args.dist_url,
world_size=1, rank=0)
args.gpu = args.process_device_map[0]
loc = 'npu:{}'.format(args.gpu)
torch.npu.set_device(loc)
model = googlenet().to(loc)
optimizer = apex.optimizers.NpuFusedSGD(model.parameters(), 0.5,
momentum=0.9,
weight_decay=1.0e-04)
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=1024)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], broadcast_buffers=False)
checkpoint = torch.load('./checkpoint.pth.tar')
model.load_state_dict(checkpoint['state_dict'])
model.eval() # 注意设置eval模式
return model
def get_raw_data():
# 请自定义获取数据方式,请勿将原始数据上传至代码仓
from PIL import Image
from urllib.request import urlretrieve
IMAGE_URL = 'https://bbs-img.huaweicloud.com/blogs/img/thumb/1591951315139_8989_1363.png'
urlretrieve(IMAGE_URL, 'tmp.jpg')
img = Image.open("tmp.jpg")
img = img.convert('RGB')
return img
def pre_process(raw_data):
# 请自定义模型预处理方法
from torchvision import transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transforms_list = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
input_data = transforms_list(raw_data)
return input_data.unsqueeze(0)
def post_process(output_tensor):
# 请自定义后处理方法
print(output_tensor)
return torch.argmax(output_tensor, 1)
if __name__ == '__main__':
# 1. 获取原始数据
raw_data = get_raw_data()
# 2. 构建模型
model = build_model()
# 3. 预处理
input_tensor = pre_process(raw_data)
# 4. 执行forward
output_tensor = model(input_tensor.to(loc))
# 5. 后处理
result = post_process(output_tensor)
# 6. 打印
print(result)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
d7c061e7a434f605159fe0952b9fbb9232f37412 | 6e7aa175667d08d8d285fd66d13239205aff44ff | /libs/pyglet/baby/entity.py | d799609c37fe7f8d8f458034e4882b32c3f80c75 | [] | no_license | jaredly/GameCC | c2a9d7b14fc45813a27bdde86e16a3e3594396e2 | babadbe9348c502d0f433fb82e72ceff475c3a3b | refs/heads/master | 2016-08-05T10:53:03.794386 | 2010-06-25T03:13:02 | 2010-06-25T03:13:02 | 269,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | #!/usr/bin/env python
import pyglet
from pyglet import *
class Entity(object):
def __init__(self, id, size, x, y, rot):
self.id = id
self.size = size
self.x = x
self.y = y
self.rot = rot
def draw(self):
glLoadIdentity()
glTranslatef(self.x, self.y, 0.0)
glRotatef(self.rot, 0, 0, 1)
glScalef(self.size, self.size, 1.0)
glBegin(GL_TRIANGLES)
glColor4f(1.0, 0.0, 0.0, 0.0)
glVertex2f(0.0, 0.5)
glColor4f(0.0, 0.0, 1.0, 1.0)
glVertex2f(0.2, -0.5)
glColor4f(0.0, 0.0, 1.0, 1.0)
glVertex2f(-0.2, -0.5)
glEnd()
# vim: et sw=4 sts=4
| [
"jared@jaredforsyth.com"
] | jared@jaredforsyth.com |
1e03619a331e14654fa38e66498e961c5be32f57 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4308/codes/1635_2445.py | a6e6742d111f1323488ecfe4d2d366cb270ef44c | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | t = input("temp em celsius c / F: ")
vt = float(input("valor da temp: "))
if (t == "F"):
v = 5 / 9 * (vt - 32)
if (t == "C"):
v = (vt *(9 / 5)) + 32
print(round(v , 2)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
b1f8ede7e93347ddc25e9082984a18a3e6fc2ee9 | 41c26da9c57052a3c9cd17b81d91f41ef074cf8d | /MyLeetCode/python/Remove Duplicates from Sorted Array II.py | fe24b6747a5ccd8a30d9d99b4a59544ec3d72026 | [] | no_license | ihuei801/leetcode | a82f59a16574f4781ce64a5faa099b75943de94e | fe79161211cc08c269cde9e1fdcfed27de11f2cb | refs/heads/master | 2021-06-08T05:12:53.934029 | 2020-05-07T07:22:25 | 2020-05-07T07:22:25 | 93,356,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | ###
# Time Complexity: O(n)
# Space Complexity: O(1)
###
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 3:
return len(nums)
i = l = 2
while i < len(nums):
if nums[i] > nums[l-2]:
nums[l] = nums[i]
l += 1
i += 1
return l | [
"hhuang@pinterest.com"
] | hhuang@pinterest.com |
fb671f0b336b329f3ccd3d3bb5bd17cb48bb1a92 | 029aa4fa6217dbb239037dec8f2e64f5b94795d0 | /Python算法指南/48_矩阵元素ZigZag返回_图形轨迹_难.py | c9d061da4948446bc195b407f14364f528fcccfc | [] | no_license | tonyyo/algorithm | 5a3f0bd4395a75703f9ee84b01e42a74283a5de9 | 60dd5281e7ce4dfb603b795aa194a67ff867caf6 | refs/heads/master | 2022-12-14T16:04:46.723771 | 2020-09-23T06:59:33 | 2020-09-23T06:59:33 | 270,216,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | class Solution:
def printZMatrix(self, matrix):
x, y= 0, 0
xLen = len(matrix[0])
yLen = len(matrix)
dx = [-1, 1]
dy = [1, -1]
ans = [matrix[x][y]]
direct = 1
for i in range(xLen * yLen - 1): # 因为提前加入了一个元素, 所以长度减1
nextX = x + dx[direct]
nextY = y + dy[direct]
if nextX >= 0 and nextX < xLen and nextY >= 0 and nextY < yLen:
x = x + dx[direct]
y = y + dy[direct]
ans.append(matrix[y][x])
else:
if direct == 1:
if nextY < 0:
x = x + 1
ans.append(matrix[y][x])
else:
y = y + 1
ans.append(matrix[y][x])
direct = 0
else:
if nextX < 0:
y = y + 1
ans.append(matrix[y][x])
else:
x = x + 1
ans.append(matrix[y][x])
direct = 1
return ans
# 主函数
if __name__ == "__main__":
matrim = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
# 创建对象
solution = Solution()
print("输入的数组为:", matrim)
print("ZigZag顺序返回矩阵的所有元素是:", solution.printZMatrix(matrim))
# class Solution:
# def printZMatrix(self, matrix):
# if len(matrix) == 0:
# return []
# x, y = 0, 0
# n, m = len(matrix), len(matrix[0])
# rows, cols = range(n), range(m)
# dx = [1, -1] #x的左右方向
# dy = [-1, 1] #y的上下方向
# direct = 1 # 1 = 左下方移动, 0 = 右上方移动,
# result = [] #轨迹上的点序
# for i in range(len(matrix) * len(matrix[0])): # 循环二维列表
# result.append(matrix[x][y])
# nextX = x + dx[direct] # 试探
# nextY = y + dy[direct]
# if nextX not in rows or nextY not in cols: # 当x和y都在集合中时, 不需要进入该判断条件
# if direct == 1: #左下方移动, 只有可能, 左边小于0, 下面超出范围.
# if nextY >= m: # m是最大行号
# nextX, nextY = x + 1, y # 向左下移动超过最大行, 则向右横向移动
# else:
# nextX, nextY = x, y + 1 # 否则, 向下移动
# else:
# if nextX >= n: # n是最大列号
# nextX, nextY = x, y + 1 # 向右上移动超过最大列, 则向下移动
# else:
# nextX, nextY = x + 1, y #否则, 向右移动
# direct = 1 - direct # 超出列表的最大行数或列数, 改变方向
# x, y = nextX, nextY
# return result | [
"1325338208@qq.com"
] | 1325338208@qq.com |
d043c6f4acf1b99b655fbe8953cdfe77b06d817f | 2c942aec676880cd1a80251005f6d8a3f6fe605a | /learning/sqlalchemy/firebird.py | a0b0e2028105727a853d16176d09ffe5206ae96b | [] | no_license | rsprenkels/python | 53ac1c89b5d60642f1a0c692a8bbac6b6d99d4c0 | 1e621ed146bdc52b735613c124a1bab36a36a39a | refs/heads/master | 2021-06-24T03:37:05.056202 | 2020-12-21T17:19:24 | 2020-12-21T17:19:24 | 151,983,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,113 | py | import logging
import time
from sqlalchemy import create_engine
import re
# need to have FDB python firebird driver installed: https://pypi.org/project/fdb/
# and some libraries: sudo apt-get install firebird3.0-common firebird3.0-utils firebird-dev firebird3.0-doc
# engine = create_engine('mysql+pymysql://etl:fN9GwzhXrYtcrj@dev-reportingdb001.m.int1-dus.dg-ao.de/dwsta')
#engine = create_engine('mysql+pymysql://jdbc:firebirdsql://10.100.211.55:3050/D:\\variobill\\production_dg\\data\\DG_VARIOBILL.FDB?lc_ctype=UTF8')
engine = create_engine('firebird+fdb://SYSDBA:Guiffez9@10.100.211.55:3050/D:\\variobill\\production_dg\\data\\DG_VARIOBILL.FDB')
query = """
SELECT
T.*,
-- lots of work is available, but throughput in the last minute less than <threshhold>
CASE WHEN T.num_old_notcompleted_orders >= 1 AND T.num_recently_finished < 5
THEN 1
ELSE 0
END AS Document_service_is_down
FROM (
SELECT (
SELECT
count(*) AS num_old_notcompleted_orders
FROM
DOCUMENT_ORDERS
WHERE
TS_WORK_FINISHED IS NULL -- order not completed
AND TS_ORDER_CREATED > dateadd( -10 DAY TO CAST('Now' AS DATE)) -- orders created in last 10 days
AND datediff(SECOND, TS_ORDER_CREATED, CAST('NOW' AS timestamp)) >= 60 -- orders older than 60 seconds
), (
SELECT
count(*) AS num_recently_finished
FROM
DOCUMENT_ORDERS
WHERE
TS_WORK_FINISHED IS NOT NULL -- order completed
AND TS_ORDER_CREATED > dateadd( -10 DAY TO CAST('Now' AS DATE)) -- orders created in last 10 days
AND datediff(SECOND, TS_WORK_FINISHED, CAST('NOW' AS timestamp)) < 30 -- finished within the last 30 seconds
)
FROM
RDB$DATABASE rd
) T
"""
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
log = logging.getLogger('queue_moni')
log.info("Starting ...")
while True:
connection = engine.connect()
result = connection.execute(query)
for row in result:
res = {}
for k, v in zip(result.keys(), row):
res[k] = v
log.info(res)
connection.close()
time.sleep(5)
connection.close() | [
"ron.sprenkels@gmail.com"
] | ron.sprenkels@gmail.com |
d27f9f0db14090d79918969d17c5555b04d42283 | 578db86c51d44ebddd0dc7b1738985b3dc69eb74 | /corehq/apps/app_manager/migrations/0014_create_exchangeapplication.py | e35359020468e6455be7cbabadb3bff0b2f9ed1d | [
"BSD-3-Clause"
] | permissive | dimagi/commcare-hq | a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b | e7391ddae1af1dbf118211ecb52c83fc508aa656 | refs/heads/master | 2023-08-16T22:38:27.853437 | 2023-08-16T19:07:19 | 2023-08-16T19:07:19 | 247,278 | 499 | 203 | BSD-3-Clause | 2023-09-14T19:03:24 | 2009-07-09T17:00:07 | Python | UTF-8 | Python | false | false | 816 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-12 20:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_manager', '0013_rename_sqlglobalappconfig'),
]
operations = [
migrations.CreateModel(
name='ExchangeApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('app_id', models.CharField(max_length=255)),
],
),
migrations.AlterUniqueTogether(
name='exchangeapplication',
unique_together=set([('domain', 'app_id')]),
),
]
| [
"orange.jenny@gmail.com"
] | orange.jenny@gmail.com |
8c8481a5aa61b668a41404a7d40c3abef74520ab | bb83b8e085d74dc2cd7e32e2688b103410309c3b | /tests/03-opencv-test-slow.py | 2d08a5edbbc370e24a3d02aa9a6f9720ddb69a58 | [] | no_license | praveen-palanisamy/duckietown-slimremote | 7fe21db47de9e68edd6bdf70b0f5e81ec6919fff | 3e4ec7e2995b82b618d20c580d44cf207dd47540 | refs/heads/master | 2020-03-27T08:09:43.455059 | 2018-08-21T18:34:33 | 2018-08-21T18:34:33 | 146,225,722 | 0 | 0 | null | 2018-08-26T23:58:55 | 2018-08-26T23:58:54 | null | UTF-8 | Python | false | false | 681 | py | import random
import time
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
for i in range(19):
print(i, cap.get(i))
# properties are listed here:
# https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
cap.set(3, 320)
cap.set(4, 240)
cap.set(5, 90) # framerate (90Hz is PiCam limit on old firmware)
tests = 10
timings = 0
i = 0
while (True):
# Capture frame-by-frame
start = time.time()
ret, frame = cap.read()
if not ret:
print("something wrong")
else:
print("got frame")
i += 1
if i == tests:
break
time.sleep(random.random()*3)
cap.release()
| [
"fgolemo@gmail.com"
] | fgolemo@gmail.com |
6c18ec2a9320cc3a969de22f2e5ffaa73575cebe | 69a327a2af65d7252b624fe7cadd537eb51ca6d6 | /Greedy/BOJ_12915.py | 6988d3195bc8af0fb54e3b0d0475f6eb2c6756c1 | [] | no_license | enriver/algorithm_python | 45b742bd17c6a2991ac8095d13272ec4f88d9bf5 | 77897f2bf0241756ba6fd07c052424a6f4991090 | refs/heads/master | 2023-09-03T23:28:23.975609 | 2021-10-29T09:25:32 | 2021-10-29T09:25:32 | 278,907,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # 대회 개최 - S2
import sys
if __name__=="__main__":
E,EM,M,MH,H=map(int,sys.stdin.readline().split())
count=0
while True:
check=[False]*3
if E>0:
E-=1
check[0]=True
else:
if EM>0:
EM-=1
check[0]=True
else:
break
if M>0:
M-=1
check[1]=True
else:
if EM>0 and MH>0:
if EM>=MH:
EM-=1
else:
MH-=1
elif EM==0 and MH>0:
MH-=1
elif EM>0 and MH==0:
EM-=1
else:
break
check[1]=True
if H>0:
H-=1
check[2]=True
else:
if MH>0:
MH-=1
check[2]=True
else:
break
if False in check:
break
count+=1
print(count) | [
"riverkeh@naver.com"
] | riverkeh@naver.com |
10d080ce6227bf8d9ed60804bfc7694c2aab1388 | af8f0d50bb11279c9ff0b81fae97f754df98c350 | /src/book/api/serializers/bookrent.py | 6150b5b433197328d3a8ea1364681b88f89caa6d | [
"Apache-2.0"
] | permissive | DmytroKaminskiy/ltt | 592ed061efe3cae169a4e01f21d2e112e58714a1 | d08df4d102e678651cd42928e2343733c3308d71 | refs/heads/master | 2022-12-18T09:56:36.077545 | 2020-09-20T15:57:35 | 2020-09-20T15:57:35 | 292,520,616 | 0 | 0 | Apache-2.0 | 2020-09-20T15:49:58 | 2020-09-03T09:09:26 | HTML | UTF-8 | Python | false | false | 719 | py | from book.models import BookRent
from rest_framework import serializers
class BookRentSerializer(serializers.ModelSerializer):
class Meta:
model = BookRent
fields = (
'id',
'price', 'price_period', 'days_period',
'user_id', 'book', 'created', 'end',
'status', 'days_period_initial',
)
extra_kwargs = {
'price': {'read_only': True},
'price_period': {'read_only': True},
'days_period': {'read_only': True},
'book': {'required': True},
'created': {'read_only': True},
'end': {'read_only': True},
'days_period_initial': {'read_only': True},
}
| [
"dmytro.kaminskyi92@gmail.com"
] | dmytro.kaminskyi92@gmail.com |
2d07f87480d88e8e632428cfd92799b42dc34c4e | c531778b6b568e5924fcf438dce274067b6e1d31 | /resources/lib/common/fileops.py | 9747eb0db8fdded067bf1e3ca6134db8c778c7c0 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | CastagnaIT/plugin.video.netflix | a5180fbbaea244a490f750a2dd417b4e7303321a | ece10d24449faaccd7d65a4093c6b5679ee0b383 | refs/heads/master | 2023-07-01T23:32:20.442923 | 2023-06-27T06:42:18 | 2023-06-27T06:42:18 | 164,314,803 | 2,019 | 456 | MIT | 2023-09-13T13:34:06 | 2019-01-06T14:27:56 | Python | UTF-8 | Python | false | false | 5,675 | py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Helper functions for file operations
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import os
import xml.etree.ElementTree as ET
import xbmc
import xbmcvfs
from resources.lib.globals import G
from .misc_utils import build_url
def check_folder_path(path):
"""
Check if folder path ends with path delimiter
If not correct it (makes sure xbmcvfs.exists is working correct)
"""
end = ''
if '/' in path and not path.endswith('/'):
end = '/'
if '\\' in path and not path.endswith('\\'):
end = '\\'
return path + end
def folder_exists(path):
"""
Checks if a given path exists
:param path: The path
:return: True if exists
"""
return xbmcvfs.exists(check_folder_path(path))
def create_folder(path):
"""
Create a folder if not exists
:param path: The path
"""
if not folder_exists(path):
xbmcvfs.mkdirs(path)
def file_exists(file_path):
"""
Checks if a given file exists
:param file_path: File path to check
:return: True if exists
"""
return xbmcvfs.exists(xbmcvfs.translatePath(file_path))
def copy_file(from_path, to_path):
"""
Copy a file to destination
:param from_path: File path to copy
:param to_path: Destination file path
:return: True if copied
"""
try:
return xbmcvfs.copy(xbmcvfs.translatePath(from_path),
xbmcvfs.translatePath(to_path))
finally:
pass
def save_file_def(filename, content, mode='wb'):
"""
Saves the given content under given filename, in the default add-on data folder
:param filename: The filename
:param content: The content of the file
:param mode: optional mode options
"""
save_file(os.path.join(G.DATA_PATH, filename), content, mode)
def save_file(file_path, content, mode='wb'):
"""
Saves the given content under given filename path
:param file_path: The filename path
:param content: The content of the file
:param mode: optional mode options
"""
with xbmcvfs.File(xbmcvfs.translatePath(file_path), mode) as file_handle:
file_handle.write(bytearray(content))
def load_file_def(filename, mode='rb'):
"""
Loads the content of a given filename, from the default add-on data folder
:param filename: The file to load
:param mode: optional mode options
:return: The content of the file
"""
return load_file(os.path.join(G.DATA_PATH, filename), mode)
def load_file(file_path, mode='rb'):
"""
Loads the content of a given filename
:param file_path: The file path to load
:param mode: optional mode options
:return: The content of the file
"""
with xbmcvfs.File(xbmcvfs.translatePath(file_path), mode) as file_handle:
return file_handle.readBytes().decode('utf-8')
def delete_file_safe(file_path):
if xbmcvfs.exists(file_path):
try:
xbmcvfs.delete(file_path)
finally:
pass
def delete_file(filename):
file_path = xbmcvfs.translatePath(os.path.join(G.DATA_PATH, filename))
try:
xbmcvfs.delete(file_path)
finally:
pass
def list_dir(path):
"""
List the contents of a folder
:return: The contents of the folder as tuple (directories, files)
"""
return xbmcvfs.listdir(path)
def delete_folder_contents(path, delete_subfolders=False):
"""
Delete all files in a folder
:param path: Path to perform delete contents
:param delete_subfolders: If True delete also all subfolders
"""
directories, files = list_dir(xbmcvfs.translatePath(path))
for filename in files:
xbmcvfs.delete(os.path.join(path, filename))
if not delete_subfolders:
return
for directory in directories:
delete_folder_contents(os.path.join(path, directory), True)
# Give time because the system performs previous op. otherwise it can't delete the folder
xbmc.sleep(80)
xbmcvfs.rmdir(os.path.join(path, directory))
def delete_folder(path):
"""Delete a folder with all his contents"""
delete_folder_contents(path, True)
# Give time because the system performs previous op. otherwise it can't delete the folder
xbmc.sleep(80)
xbmcvfs.rmdir(xbmcvfs.translatePath(path))
def write_strm_file(videoid, file_path):
"""Write a playable URL to a STRM file"""
filehandle = xbmcvfs.File(xbmcvfs.translatePath(file_path), 'wb')
try:
filehandle.write(bytearray(build_url(videoid=videoid,
mode=G.MODE_PLAY_STRM).encode('utf-8')))
finally:
filehandle.close()
def write_nfo_file(nfo_data, file_path):
"""Write a NFO file"""
filehandle = xbmcvfs.File(xbmcvfs.translatePath(file_path), 'wb')
try:
filehandle.write(bytearray('<?xml version=\'1.0\' encoding=\'UTF-8\'?>'.encode('utf-8')))
filehandle.write(bytearray(ET.tostring(nfo_data, encoding='utf-8', method='xml')))
finally:
filehandle.close()
def join_folders_paths(*args):
"""Join multiple folder paths in a safe way"""
# Avoid the use of os.path.join, in some cases with special chars like % break the path
return xbmcvfs.makeLegalFilename('/'.join(args))
def get_xml_nodes_text(nodelist):
"""Get the text value of text node list"""
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
| [
"gottardo.stefano.83@gmail.com"
] | gottardo.stefano.83@gmail.com |
788e6db343a9cecdc688f28d7679566f6d75ae21 | db575f3401a5e25494e30d98ec915158dd7e529b | /BIO_Stocks/ACHV.py | 54d786c8ecbb6e82a8972b7e44fa20ff5367c587 | [] | no_license | andisc/StockWebScraping | b10453295b4b16f065064db6a1e3bbcba0d62bad | 41db75e941cfccaa7043a53b0e23ba6e5daa958a | refs/heads/main | 2023-08-08T01:33:33.495541 | 2023-07-22T21:41:08 | 2023-07-22T21:41:08 | 355,332,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | import requests
from lxml import html
from bs4 import BeautifulSoup
import os
from datetime import date, datetime
from ValidationTools import validateday
from Database_Connections import InsertData, Insert_Logging
def main(id_control):
try:
url = 'http://ir.achievelifesciences.com/news-releases'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
result = requests.get(url, headers=headers)
#print(result.content.decode())
html_content = result.content.decode()
soup = BeautifulSoup(html_content, 'html.parser')
#print(soup)
articles = soup.findAll('li', attrs={'class':'wd_item'})
# get first article
FIRST_ARTICLE = articles[0]
article_date = FIRST_ARTICLE.find('div', attrs={'class':'wd_date'})
article_desc = FIRST_ARTICLE.find('div', attrs={'class':'wd_title'})
v_article_date = article_date.text.lstrip().rstrip()
#if the process find any article with the today date
istoday, v_art_date = validateday(v_article_date)
if (istoday == True):
v_ticker = os.path.basename(__file__).replace(".py", "")
v_url = article_desc.a.get('href')
v_description = article_desc.text.lstrip().rstrip()
now = datetime.now()
print("URL: " + v_url)
print("DESCRIPTION: " + v_description)
print("ARTICLE_DATE: " + str(now))
# Insert articles
if "https://" in v_url:
InsertData(v_ticker, v_description, v_url, v_art_date)
else:
InsertData(v_ticker, v_description, url, v_art_date)
except Exception:
error_message = "Entrou na excepção ao tratar " + os.path.basename(__file__) + "..."
print(error_message)
Insert_Logging(id_control, 'Detail', error_message)
pass
#InsertData()
if __name__ == "__main__":
main()
| [
"andisc_3@hotmail.com"
] | andisc_3@hotmail.com |
18a8fc714c96568c2445ca1875ce8a2ac25eaa47 | 2612f336d667a087823234daf946f09b40d8ca3d | /python/helpers/typeshed/stdlib/3.5/zipapp.pyi | 9fac5a026afff523bc4f6860c2b92dce62e420c9 | [
"MIT",
"Apache-2.0"
] | permissive | tnorbye/intellij-community | df7f181861fc5c551c02c73df3b00b70ab2dd589 | f01cf262fc196bf4dbb99e20cd937dee3705a7b6 | refs/heads/master | 2021-04-06T06:57:57.974599 | 2018-03-13T17:37:00 | 2018-03-13T17:37:00 | 125,079,130 | 2 | 0 | Apache-2.0 | 2018-03-13T16:09:41 | 2018-03-13T16:09:41 | null | UTF-8 | Python | false | false | 363 | pyi | # Stubs for zipapp (Python 3.5+)
from pathlib import Path
from typing import BinaryIO, Optional, Union
_Path = Union[str, Path, BinaryIO]
class ZipAppError(Exception): ...
def create_archive(source: _Path, target: Optional[_Path] = ..., interpreter: Optional[str] = ..., main: Optional[str] = ...) -> None: ...
def get_interpreter(archive: _Path) -> str: ...
| [
"andrey.vlasovskikh@jetbrains.com"
] | andrey.vlasovskikh@jetbrains.com |
873c78875fe1bf5a9557aea8427a692b110ac7a0 | e7a56f1f086352a45947a7ab3cecd71828d21f50 | /tovp/promotions/migrations/0005_auto_20150302_1323.py | f64b2f62600beeece25c1687f6fc8be5aa6f4fe1 | [
"MIT"
] | permissive | nrsimha/tovp | af2df2967a47e43c5378dc52c99652e8242c429b | 311bc957c95c294811d737f5df30b0a218d35610 | refs/heads/master | 2023-05-26T05:50:52.405855 | 2017-05-10T13:40:59 | 2017-05-10T13:40:59 | 27,473,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('promotions', '0004_auto_20150302_1036'),
]
operations = [
migrations.AddField(
model_name='goldenbrick',
name='brick_status',
field=models.CharField(verbose_name='Brick Status', max_length=100, default='need_to_send', choices=[('need_to_send', 'Need to send to DC'), ('name_given', 'Name given to DC'), ('brick_made', 'Brick is made')]),
preserve_default=True,
),
migrations.AddField(
model_name='guruparamparabrick',
name='brick_status',
field=models.CharField(verbose_name='Brick Status', max_length=100, default='need_to_send', choices=[('need_to_send', 'Need to send to DC'), ('name_given', 'Name given to DC'), ('brick_made', 'Brick is made')]),
preserve_default=True,
),
migrations.AddField(
model_name='nrsimhatile',
name='brick_status',
field=models.CharField(verbose_name='Brick Status', max_length=100, default='need_to_send', choices=[('need_to_send', 'Need to send to DC'), ('name_given', 'Name given to DC'), ('brick_made', 'Brick is made')]),
preserve_default=True,
),
migrations.AddField(
model_name='radhamadhavabrick',
name='brick_status',
field=models.CharField(verbose_name='Brick Status', max_length=100, default='need_to_send', choices=[('need_to_send', 'Need to send to DC'), ('name_given', 'Name given to DC'), ('brick_made', 'Brick is made')]),
preserve_default=True,
),
]
| [
"pnd@mayapurmedia.com"
] | pnd@mayapurmedia.com |
354524169b42cadba1eed445e88ac79ae684b416 | 9ffabcaef668b1c0ec8f9451e2d02b472ca6c61d | /compute/compute_provisioner/compute_provisioner/allocator.py | 684862cf43b20da1cdbcc0dc1c61183335c2d3a6 | [] | no_license | ESGF/esgf-compute-wps | 704ee5940e9cbc7d12ef41e0a724202c69fffc67 | 82fb3e79f8e43367fa31a6dba7127da4b744a944 | refs/heads/devel | 2021-07-15T06:52:30.731608 | 2021-03-11T01:28:40 | 2021-03-11T01:28:40 | 29,711,838 | 9 | 6 | null | 2020-02-12T18:12:47 | 2015-01-23T02:09:58 | Python | UTF-8 | Python | false | false | 3,829 | py | import logging
import yaml
from jinja2 import DebugUndefined
from jinja2 import Template
from kubernetes import client
from kubernetes import config
logger = logging.getLogger(__name__)
class KubernetesAllocator(object):
def __init__(self):
config.load_incluster_config()
self.core = client.CoreV1Api()
self.apps = client.AppsV1Api()
self.extensions = client.ExtensionsV1beta1Api()
def create_pod(self, namespace, body, **kwargs):
return self.core.create_namespaced_pod(namespace, body, **kwargs)
def list_pods(self, namespace, label_selector, **kwargs):
return self.core.list_namespaced_pod(namespace, label_selector=label_selector, **kwargs)
def create_deployment(self, namespace, body, **kwargs):
return self.apps.create_namespaced_deployment(namespace, body, **kwargs)
def create_service(self, namespace, body, **kwargs):
return self.core.create_namespaced_service(namespace, body, **kwargs)
def create_ingress(self, namespace, body, **kwargs):
return self.extensions.create_namespaced_ingress(namespace, body, **kwargs)
def create_config_map(self, namespace, body, **kwargs):
return self.core.create_namespaced_config_map(namespace, body, **kwargs)
def delete_resources(self, namespace, label_selector, **kwargs):
api_mapping = {
'pod': self.core,
'deployment': self.apps,
'service': self.core,
'ingress': self.extensions,
'config_map': self.core,
}
for name, api in api_mapping.items():
list_name = f'list_namespaced_{name!s}'
delete_name = f'delete_namespaced_{name!s}'
output = getattr(api, list_name)(namespace, label_selector=label_selector, **kwargs)
logger.info(f'Removing {len(output.items)!r} {name!s}')
for x in output.items:
getattr(api, delete_name)(x.metadata.name, namespace, **kwargs)
def create_resources(self, request, namespace, labels, service_account_name, image_pull_secret, **kwargs):
for item in request:
template = Template(item, undefined=DebugUndefined)
config = {
'image_pull_secret': image_pull_secret,
'labels': [f'{x}: {y}' for x, y in labels.items()],
}
rendered_item = template.render(**config)
yaml_data = yaml.safe_load(rendered_item)
try:
yaml_data['metadata']['labels'].update(labels)
except KeyError:
yaml_data['metadata'].update({
'labels': labels
})
kind = yaml_data['kind']
logger.info(f'Allocating {kind!r} with labels {yaml_data["metadata"]["labels"]!r}')
if kind == 'Pod':
yaml_data['spec']['serviceAccountName'] = service_account_name
yaml_data['spec']['imagePullSecrets'] = [
{'name': image_pull_secret},
]
self.create_pod(namespace, yaml_data)
elif kind == 'Deployment':
yaml_data['spec']['template']['spec']['serviceAccountName'] = service_account_name
yaml_data['spec']['template']['spec']['imagePullSecrets'] = [
{'name': image_pull_secret},
]
self.create_deployment(namespace, yaml_data)
elif kind == 'Service':
self.create_service(namespace, yaml_data)
elif kind == 'Ingress':
self.create_ingress(namespace, yaml_data)
elif kind == 'ConfigMap':
self.create_config_map(namespace, yaml_data)
else:
raise Exception('Requested an unsupported resource')
| [
"boutte.jason@gmail.com"
] | boutte.jason@gmail.com |
670730c7a29ef8f6c8e267f16949943f8b56d7af | d9f3fd0661bcf13416eb0d3d7bfbc545706af3e0 | /dev_bot.py | f6d2f13d63f3404838ddeda7016d2663ba7ab7c4 | [
"MIT"
] | permissive | jayrav13/njit-events-api | a26edffd145f4820e53933f4555d8b4a4ca601fc | 0027c0741601170d4806e45dbf4c08eecfb2cecc | refs/heads/master | 2021-01-10T08:59:38.574026 | 2016-01-30T23:33:56 | 2016-01-30T23:33:56 | 44,531,107 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,984 | py | # By Jay Ravaliya
# Imports
from twython import Twython
from secret import consumer_key, consumer_secret, access_token, access_secret
from model import Posted, db
import requests
import json
import datetime
import random
import math
import sys
# Set up Twitter keys
twitter = Twython(consumer_key, consumer_secret, access_token, access_secret)
# Set up payload for analytics.
payload = {
"userid" : "TwitterBot",
"device" : "TwitterBot"
}
# Send post request to API to get data, convert it to JSON right away.
r = requests.post("http://eventsatnjit.jayravaliya.com/api/v0.2/events", json=payload).json()
# Retrive current time.
currenttime = datetime.datetime.now()
# Total number of events, counted.
total = 0
# At 8:00 AM, post morning tweet.
if(currenttime.hour == 8):
# Count total elements that are taking place today. Post it.
# Else, post that there are no events going on.
for elem in r["response"]:
if elem["datetime"]["is_today"] == True:
total = total + 1
if total > 0:
tweet = "There are " + str(total) + " events taking place today! Be sure to stop by and check some out! via @EventsAtNJIT"
else:
tweet = "Ah - no events going on today! Be sure to check back tomorrow to see what's going on!"
print(tweet)
twitter.update_status(status=tweet)
# If posting at night, post # of events going on tomorrow.
elif(currenttime.hour == 22):
tweet = "That's all for today! Visit back tomorrow to learn about the awesome events taking place on campus! via @EventsAtNJIT"
twitter.update_status(status=tweet)
# Posting every two hours:
else:
# Starting text.
starters = [
"Awesome event coming up: ",
"Did you know? ",
"Check this out: ",
"Stop by: "
]
# Categories to include.
categories = [
"Intramurals & Recreation",
"Reception, Banquet, Party",
"Lecture, Seminar, Workshop",
"Conference, Fair",
"Other"
]
# Count the number of events. Exit if there are no events left.
num_events = 0
def today_events():
global num_events
for elem in r["response"]:
if (elem["datetime"]["is_today"] == True or elem["datetime"]["is_tomorrow"]):
num_events = num_events + 1
today_events()
if (num_events == 0):
print "NO EVENTS"
sys.exit()
# Input JSON element - ouput validity.
def valid_event(elem):
if (elem["datetime"]["is_today"] == True or elem["datetime"]["is_tomorrow"] == True):
if (elem["datetime"]["multiday"] == False and (elem["datetime"]["currently_happening"] == False or elem["datetime"]["starting_now"] == True)):
return True
return False
# Input JSON element - output tweet.
def generate_tweet(elem):
print("Element Id: " + str(elem["id"]))
# Random intro, unless happening now.
if elem["datetime"]["currently_happening"] == True:
intro = "Happening Now: "
else:
intro = starters[int(math.floor(random.random() * len(starters)))]
# Add basic data.
tweet = "\"" + elem["name"] + "\"" + " hosted by " + elem["organization"] + " "
if elem["datetime"]["is_today"] == True:
tweet = tweet + "starts today "
elif elem["datetime"]["is_tomorrow"] == True:
tweet = tweet + "starts tomorrow "
elif elem["datetime"]["currently_happening"] == True:
tweet = tweet + "started "
else:
tweet = tweet + "starts on " + elem["datetime"]["start"]["common_formats"]["date"] + " "
# Finalize tweet, return.
tweet = tweet + "at " + elem["datetime"]["start"]["common_formats"]["time"] + " in " + elem["location"] + "."
if len(intro + tweet) <= 140:
return intro + tweet
elif len(tweet) <= 140:
return tweet
else:
return None
# Loop through events, tweet!
for elem in r["response"]:
if valid_event(elem) == True:
try:
tweet = generate_tweet(elem)
p = Posted.query.filter_by(event_id=elem["id"]).first()
if tweet != None and p == None:
print tweet + " / " + str(len(tweet))
p = Posted(elem["id"])
db.session.add(p)
db.session.commit()
twitter.update_status(status=tweet)
break
except:
pass
| [
"jayrav13@gmail.com"
] | jayrav13@gmail.com |
cd157bf03647a3a30c7fc5d919c7b066c4747813 | 43f9cfd3761171ab59742d7a5b768b73e81eb973 | /lang/femtocode/thirdparty/meta/asttools/visitors/copy_tree.py | 09fe4891fcb2028d07ab600999f95c448f4c1035 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | diana-hep/femtocode | 2980c4d39f941506d345651ee56ddb00a63320d5 | bfde538a99f35345eec8b5a0db670f29f83e1cc5 | refs/heads/master | 2021-01-19T12:13:04.303112 | 2017-08-15T05:42:49 | 2017-08-15T05:42:49 | 69,881,392 | 26 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | '''
Created on Dec 12, 2011
@author: sean
'''
from . import Visitor
import ast
#FIXME: add tests
class CopyVisitor(Visitor):
'''
Copy only ast nodes and lists
'''
def visitDefault(self, node):
Node = type(node)
new_node = Node()
for _field in Node._fields:
if hasattr(node, _field):
field = getattr(node, _field)
if isinstance(field, (list, tuple)):
new_list = []
for item in field:
if isinstance(item, ast.AST):
new_item = self.visit(item)
else:
new_item = item
new_list.append(new_item)
setattr(new_node, _field, new_list)
elif isinstance(field, ast.AST):
setattr(new_node, _field, self.visit(field))
else:
setattr(new_node, _field, field)
for _attr in node._attributes:
if hasattr(node, _attr):
setattr(new_node, _attr, getattr(node, _attr))
return new_node
def copy_node(node):
return CopyVisitor().visit(node)
| [
"jpivarski@gmail.com"
] | jpivarski@gmail.com |
a327de873746d6c5b6eedee78c3955284df9f7b7 | 14913a0fb7e1d17318a55a12f5a181dddad3c328 | /07.garosero1.py | 55135979754b8aa2ae2388e221ad3cecc621ca44 | [] | no_license | Jesuisjavert/Algorithm | 6571836ec23ac3036565738c2bee94f416595f22 | 730549d19e66e20b3474a235a600958a8e036a0e | refs/heads/master | 2023-02-16T06:34:50.984529 | 2020-09-25T09:40:30 | 2020-09-25T09:40:30 | 330,849,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | for test_case in range(0, 10):
T = int(input())
N = 100
arr = [list(map(int, input().split())) for i in range(N)]
MAX = 0
eorkrtjs1 = eorkrtjs2 = 0
garo = 0
sero = 0
for i in range(N):
eorkrtjs1 += arr[i][i]
eorkrtjs2 += arr[i][99-i]
for j in range(N):
garo += arr[i][j]
sero += arr[j][i]
MAX = max(garo, sero, eorkrtjs1, eorkrtjs2)
print(f'{T},{Max}') | [
"jesuisjavert@gmail.com"
] | jesuisjavert@gmail.com |
08fb7d4a8b60f9656534c8c19aa38ab3d8a6a448 | abeec076f89231c4dd589e84def8301e653d6e20 | /orders/migrations/0009_remove_order_cook.py | f119d17a90f1126e4fe5228f032a2bb623ef00a1 | [] | no_license | gibil5/pcm_restaurant | 1cde6ee2780d3aa39dbc26dd9583f8465a1ff13a | a56ec01c533ed2b6e198de9813f9518a3eca2d14 | refs/heads/master | 2020-08-29T20:10:13.606229 | 2019-12-01T19:48:47 | 2019-12-01T19:48:47 | 218,160,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # Generated by Django 2.2.6 on 2019-11-15 20:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0008_auto_20191115_1359'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='cook',
),
]
| [
"jrevilla55@gmail.com"
] | jrevilla55@gmail.com |
a6eddbcfb2a9ca0bb96202a98f76663c6b28ed92 | 88de1855cddc294bf7e23e000738b97e2ce8fe5d | /peek_core_user/server/UserImportApiABC.py | 93c4a9b1cb214e31ccc4ded908c89d4457828b92 | [] | no_license | Synerty/peek-core-user | cea121a5bc37552055eff7d9c25e621531435631 | 89c9b782a9f5c7ae042a1498062c30cc07efa8c8 | refs/heads/master | 2020-03-18T17:07:18.765974 | 2020-02-24T03:32:40 | 2020-02-24T03:32:40 | 135,007,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | from abc import ABCMeta, abstractmethod
from twisted.internet.defer import Deferred
class UserImportApiABC(metaclass=ABCMeta):
@abstractmethod
def importInternalUsers(self, importHash: str, usersEncodedPayload: bytes) -> Deferred:
""" Import Internal Users
Add, replace and remove users in the internal DB
:param importHash: A string representing this group of items to import
:param usersEncodedPayload: A List[ImportInternalUserTuple] to import,
wrapped in a serialised payload.
Wrap the disps list with ::
dispsVortexMsg = Payload(tuples=users).toVortexMsg()
Calling this method with no tuples will delete all items with this importHash
:return: A deferred that fires when the users are loaded
"""
@abstractmethod
def importInternalGroups(self, importHash: str, groupsEncodedPayload: bytes) -> Deferred:
""" Import Internal Groups
Add, replace and remove users in the internal DB
:param importHash: A string representing this group of items to import
:param groupsEncodedPayload: A List[ImportInternalGroupTuple] to import,
wrapped in a serialised payload.
Wrap the disps list with ::
dispsVortexMsg = Payload(tuples=groups).toVortexMsg()
Calling this method with no tuples will delete all items with this importHash
:return: A deferred that fires when the groups are loaded
"""
| [
"jarrod.chesney@synerty.com"
] | jarrod.chesney@synerty.com |
80b4417afbb29d716ffbdac552a7a325410ed080 | 25c531d2acc0218cc8fc3e275db4c2042dbc3a96 | /exam2/min_max_valid.py | 71429f561fd5401c5e2b1dd5c3842876b9877c95 | [] | no_license | anaswara-97/python_project | 230242287886479ec134cb48cdfbacb70e9c9228 | efd0156d0c67b9686f52638b8b3264eb6bdef23d | refs/heads/master | 2023-08-16T16:16:11.063927 | 2021-09-20T14:24:50 | 2021-09-20T14:24:50 | 402,699,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py |
import re
x=input("enter string: ")
n='[A-Z]\w[a-zA-Z0-9][A-Z]{5,10}$'
m=re.fullmatch(n,x)
if m is not None:
print("valid")
else:
print("invalid") | [
"warrior123@gmail.com"
] | warrior123@gmail.com |
dc5c0a4ef33f5f4687f9d3e60f82b0952c5ad268 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/managednetworkfabric/azure-mgmt-managednetworkfabric/generated_samples/internet_gateways_list_by_subscription_maximum_set_gen.py | 17d2cd72ae957d32e3c0592b68efc2a6404ab9b0 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,648 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.managednetworkfabric import ManagedNetworkFabricMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-managednetworkfabric
# USAGE
python internet_gateways_list_by_subscription_maximum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ManagedNetworkFabricMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="1234ABCD-0A1B-1234-5678-123456ABCDEF",
)
response = client.internet_gateways.list_by_subscription()
for item in response:
print(item)
# x-ms-original-file: specification/managednetworkfabric/resource-manager/Microsoft.ManagedNetworkFabric/stable/2023-06-15/examples/InternetGateways_ListBySubscription_MaximumSet_Gen.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
a99189c5c97d330b6b9d22b893860bde4226486a | 5d2f4c05ba0ac80370ed1d03dc1fde6c2a6d53b3 | /common/urls.py | 23e6e24078ba7d95f9d4f7bf99fe1e7411558010 | [] | no_license | yindashan/dwz | af7f1d2a0ca5edad0beac15a71861701c7f0c8ab | eca18a91d882facae93e7d3ec66f4c943f9eba32 | refs/heads/master | 2020-04-18T20:07:31.519664 | 2014-10-15T02:31:28 | 2014-10-15T02:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | #!usr/bin/env python
#coding: utf-8
from django.conf.urls import patterns
from django.conf.urls import url
urlpatterns = patterns('',
url(r'^$', 'common.views.index',name="index"),
url(r'^index/$', 'common.views.index',name="common_index"),
url(r'^login/$', 'common.views.login', name="common_login"),
url(r'^logout/$', 'common.views.logout', name="common_logout"),
url(r'^success/$', 'common.views.success',name="common_success"),
url(r'^nav_index/$', 'common.views.nav_index', name="common_index"),
url(r'^nav_resource/$', 'common.views.nav_resource', name="common_resource"),
url(r'^nav_log/$', 'common.views.nav_log', name="common_log"),
url(r'^nav_ippool/$', 'common.views.nav_ippool', name="common_ippool"),
url(r'^nav_user/$', 'common.views.nav_user', name="common_user"),
url(r'^nav_authority/$', 'common.views.nav_authority', name="common_authority"),
url(r'^main/$', 'common.views.main', name="common_main"),
) | [
"="
] | = |
d5ff5fb556f4adb297f78cc050836eea87ea143c | 9868f287cfa54a8ed6c67b91b59d4f09bbd9410c | /large_language_model/paxml/utils/select_text.py | 30115f754f6bce47da31347bddc489f62936f902 | [
"Apache-2.0"
] | permissive | mlcommons/training | 41c7e21ea074b5f5bb040d3602e621c3e987cc0e | 2f4a93fb4888180755a8ef55f4b977ef8f60a89e | refs/heads/master | 2023-09-05T12:45:53.020925 | 2023-08-03T15:43:54 | 2023-08-03T15:43:54 | 127,351,529 | 431 | 162 | Apache-2.0 | 2023-09-07T23:35:53 | 2018-03-29T21:56:06 | Python | UTF-8 | Python | false | false | 2,240 | py | """Script to randomly pick certain number of text from C4 dataset.
"""
import argparse
import time
import tensorflow as tf
import tensorflow_datasets as tfds
parser = argparse.ArgumentParser(
description="Randomly pick text from C4 dataset.")
parser.add_argument(
"--data_dir",
type=str,
default="",
help="Path to tfds directory, which contains C4/../x.y.z.")
parser.add_argument(
"--language",
type=str,
default="en",
help="Language of dataset.")
parser.add_argument(
"--version",
type=str,
default="3.0.1",
help="Version of dataset.")
parser.add_argument(
"--split",
type=str,
default="train",
help="Split of dataset.")
parser.add_argument(
"--num_examples",
type=int,
default=40000000,
help="Number of examples to pick from dataset.")
parser.add_argument(
"--output_text_file",
type=str,
default="",
help="Path for output text file.")
args = parser.parse_args()
if __name__ == '__main__':
tic = time.time()
ds_name = "c4/" + args.language + ":" + args.version
ds = tfds.load(
ds_name,
split=args.split,
shuffle_files=True,
data_dir=args.data_dir)
num_examples = 0
max_text_length = 0
total_text_length = 0
num_lines = 0
max_line_length = 0
total_line_length = 0
fout = open(args.output_text_file, "wb")
for example in ds:
text = example["text"].numpy()
length = len(text)
if length > max_text_length:
max_text_length = length
total_text_length += length
fout.write(text)
fout.write(b"\n\n")
num_examples += 1
if (num_examples % 10000) == 0:
print(num_examples)
lines = text.split(b"\n")
for line in lines:
line_length = len(line)
if line_length > max_line_length:
max_line_length = line_length
total_line_length += line_length
num_lines += 1
if num_examples >= args.num_examples:
break
fout.close()
print(
"num_examples = ", num_examples,
"max_length = ", max_text_length,
"avg_length = ", total_text_length / num_examples)
print(
"num_lines = ", num_lines,
"max_length = ", max_line_length,
"avg_length = ", total_line_length / num_lines)
| [
"noreply@github.com"
] | mlcommons.noreply@github.com |
88d8c278e5cb2bcc73aba96487f527d9802dcc1f | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/containerservice/v20200101/list_managed_cluster_admin_credentials.py | a4415be37a63f657ee0531031637b6bb9de397f8 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListManagedClusterAdminCredentialsResult',
'AwaitableListManagedClusterAdminCredentialsResult',
'list_managed_cluster_admin_credentials',
'list_managed_cluster_admin_credentials_output',
]
@pulumi.output_type
class ListManagedClusterAdminCredentialsResult:
"""
The list of credential result response.
"""
def __init__(__self__, kubeconfigs=None):
if kubeconfigs and not isinstance(kubeconfigs, list):
raise TypeError("Expected argument 'kubeconfigs' to be a list")
pulumi.set(__self__, "kubeconfigs", kubeconfigs)
@property
@pulumi.getter
def kubeconfigs(self) -> Sequence['outputs.CredentialResultResponse']:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kubeconfigs")
class AwaitableListManagedClusterAdminCredentialsResult(ListManagedClusterAdminCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListManagedClusterAdminCredentialsResult(
kubeconfigs=self.kubeconfigs)
def list_managed_cluster_admin_credentials(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListManagedClusterAdminCredentialsResult:
"""
The list of credential result response.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerservice/v20200101:listManagedClusterAdminCredentials', __args__, opts=opts, typ=ListManagedClusterAdminCredentialsResult).value
return AwaitableListManagedClusterAdminCredentialsResult(
kubeconfigs=__ret__.kubeconfigs)
@_utilities.lift_output_func(list_managed_cluster_admin_credentials)
def list_managed_cluster_admin_credentials_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListManagedClusterAdminCredentialsResult]:
"""
The list of credential result response.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
9e1bc8cffb5fd42b2baafbb9a70995f6cd284795 | 39329ae5773c9b4c1f9c91eec393507f5e8ae1c0 | /server/.history/server_20200307204844.py | c8fab0782eb750f449ebba0c2641d732d187e834 | [] | no_license | dobreandrei1/legal | 083267aae7faa10775e5a634679869fce0ac3136 | dd05fad8df599f9fc34f56628ebd8861f7a004b4 | refs/heads/master | 2021-09-08T20:16:29.926214 | 2020-03-08T09:24:04 | 2020-03-08T09:24:04 | 245,785,262 | 0 | 0 | null | 2021-09-03T00:42:33 | 2020-03-08T09:22:37 | Python | UTF-8 | Python | false | false | 1,681 | py | from pathlib import Path
from flask import Flask, render_template, request, send_file, send_from_directory, safe_join, abort, current_app
# from werkzeug import secure_filename
import pandas as pd
import os
import time
import json
from flask_cors import CORS
from haikunator import Haikunator
import unidecode
import PyPDF2
import unidecode
haikunator = Haikunator()
app = Flask(__name__)
CORS(app)
applicationVersion = 0
@app.route('/upload')
def upload_file():
return render_template('upload.html')
@app.route('/api/titles', methods = ['GET', 'POST'])
def get_titles():
if request.method == 'POST':
f = request.files['file']
filename = request.form['filename']
# TODO: maybe check if file alreay exists and not save multipletime
# - get list of all files
# - if filename variable is a substr of any file name in folder: compare their contents
# - if match don`t save file again but use that one
name = filename + '.pdf'
if Path(name).exists():
name = filename + '.pdf'
f.save(name)
pdfFileObject = open('clauze.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObject)
pages = pdfReader.numPages
pageObject = pdfReader.getPage(0)
print(unidecode.unidecode(pageObject.extractText()))
pdfFileObject1 = open('clauze.pdf', 'rb')
pdfReader1 = PyPDF2.PdfFileReader(pdfFileObject1)
pages1 = pdfReader1.numPages
pageObject1 = pdfReader1.getPage(0)
print(unidecode.unidecode(pageObject.extractText()))
return 1
if __name__ == '__main__':
app.run(debug = False, host='0.0.0.0')
| [
"dobreandrei1@yahoo.com"
] | dobreandrei1@yahoo.com |
4b4bc1e6daf1efdf2056529b8c8fc3498f56d5d1 | 304e75224229786ba64c6ef2124007c305019b23 | /src/easy/answer/decompress_run_length_encoded_list.py | aebe45e26dd4e6ae7aee6574b4b462cd68ecac45 | [] | no_license | Takuma-Ikeda/other-LeetCode | 9179a8100e07d56138fd3f3f626951195e285da2 | 499616d07011bee730b9967e9861e341e62d606d | refs/heads/master | 2023-04-14T06:09:35.341039 | 2023-04-10T02:29:18 | 2023-04-10T02:29:18 | 226,260,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from typing import List
class Solution:
def decompressRLElist(self, nums: List[int]) -> List[int]:
result = []
# 偶数番目
freq = nums[0::2]
# 奇数番目
val = nums[1::2]
for i in range(len(freq)):
while 0 != freq[i]:
result += [val[i]]
freq[i] -= 1
return result
# 模範解答
# https://leetcode.com/problems/decompress-run-length-encoded-list/discuss/478426/Python-3-(one-line)-(beats-100)
'''
class Solution:
def decompressRLElist(self, nums: List[int]) -> List[int]:
l, result = len(nums), []
# range ※ 引数 3 つバージョン
# 第一引数: start
# 題ニ引数: stop
# 第三引数: step (オプション) 数字を刻み方を指定できる
for i in range(0, l, 2):
# 繰り返し回数 * [値]
result.extend(nums[i] * [nums[i + 1]])
return result
'''
| [
"el.programdear@gmail.com"
] | el.programdear@gmail.com |
124b5269c241ba565b5083c8361be7607ad63332 | 6a33cb94d4af1d8a7329ddc6c9d42f870c35bb2f | /python/euler39.py | 2de1687b9949fd54f3d78119582b7a029ffaeed8 | [] | no_license | vochong/project-euler | 836321cc8e7d2e7cdf22b3b136d44dcba74a8701 | 6a0c7103861ff825bf84800b6e2e62819a41e36d | refs/heads/master | 2020-04-29T10:41:48.487159 | 2018-09-19T00:13:34 | 2018-09-19T00:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def euler39():
"""
If p is the perimeter of a right angle triangle with integral length
sides, {a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p <= 1000, is the number of solutions maximised?
"""
count = [0] * 1001
for a in range(1, 333):
for b in range(a+1, 500):
c = (a**2 + b**2) ** 0.5
p = a + b + int(c)
if int(c) != c: continue
if p > 1000: break
count[p] += 1
return count.index(max(count))
if __name__ == "__main__":
print euler39()
| [
"kueltz.anton@gmail.com"
] | kueltz.anton@gmail.com |
0fbc5efe97124e98fc31e9a8ac8fb8c69185eb8f | 98420fdd66b8dce46ef88cd34fcace36777fa232 | /obsolete/py/logreg/nonlinear_cg.py | d638ea1131e453f55add6479b98188360c900d80 | [] | no_license | Daiver/jff | f972fe7464f78ba6008a036b697ea3f04b7010a4 | 33d6a781af8d7f6ae60c25e10051977af2fef1b9 | refs/heads/master | 2023-04-07T06:33:41.487938 | 2022-05-03T10:07:32 | 2022-05-03T10:07:32 | 12,180,634 | 1 | 1 | null | 2023-04-03T19:25:00 | 2013-08-17T15:03:14 | C++ | UTF-8 | Python | false | false | 2,049 | py | import numpy as np
import linesearch
def nonLinCG(func, grad, initialX, nIter):
x = initialX
r0 = -grad(x)
d0 = r0
gradLen = r0.shape[0]
curSteps = 0
for iter in xrange(nIter):
curSteps += 1
#alpha = linesearch.quadLineSearchIter(
#5, 0.00001, 0.0001, x, d0, grad)
alpha = linesearch.quadLineSearch(0.000001, x, -r0, d0, grad)
x = x + alpha * d0
r1 = -grad(x)
beta = r1.dot(r1 - r0)/(r0.dot(r0))
if beta < 0:
beta = 0
elif curSteps > gradLen:
curSteps = 0
beta = 0
d0 = r1 + beta * d0
r0 = r1
err = func(x)
print iter, 'err', err
if err < 0.00001:
break
return x
def nonLinCGSeq(func, grad, initialX, nIter):
x = initialX
xs = [x]
errs = [func(x)]
r0 = -grad(x)
d0 = r0
gradLen = r0.shape[0]
curSteps = 0
for iter in xrange(nIter):
curSteps += 1
#alpha = linesearch.quadLineSearchIter(
#5, 0.00001, 0.0001, x, d0, grad)
alpha = linesearch.quadLineSearch(0.0001, x, -r0, d0, grad)
x = x + alpha * d0
r1 = -grad(x)
beta = r1.dot(r1 - r0)/(r0.dot(r0))
if beta < 0:
beta = 0
elif curSteps > gradLen:
curSteps = 0
beta = 0
d0 = r1 + beta * d0
r0 = r1
err = func(x)
xs.append(x)
errs.append(err)
print iter, 'err', err
if err < 0.00001:
break
return np.array(xs), np.array(errs)
if __name__ == '__main__':
from scipy.optimize import rosen, rosen_der, minimize
#import plot_rosen
# print nonLinCG(
#rosen,
#rosen_der,
#np.array([-0.1, -1.0]),
#50)
# xss, zs = nonLinCGSeq(
#rosen,
#rosen_der,
#np.array([-2.1, 1.0]),
#50)
#xs, ys = xss[:, 0], xss[:, 1]
#plot_rosen.plotRosenbrock([xs, ys, zs])
| [
"ra22341@ya.ru"
] | ra22341@ya.ru |
c49a3366869f240a88591a16171f85912531ef19 | b99b32fb0b4597bee94809ebd3b2ddae43064bee | /landmark_detection/menpofit/clm/__init__.py | 4fec6db002ccf5d5441b4403563a053bda94ebe0 | [] | no_license | HongwenZhang/ECT-FaceAlignment | c0129dc2aa20bc2bdba03a9ed1cabebcd5e5d848 | e94b446db73fca5ba751d6d9a81d42633208f228 | refs/heads/master | 2023-01-29T14:25:19.502350 | 2020-12-13T09:18:55 | 2020-12-13T09:18:55 | 111,511,579 | 31 | 19 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from .base import CLM
from .fitter import GradientDescentCLMFitter
from .algorithm import ActiveShapeModel, RegularisedLandmarkMeanShift
from .expert import (CorrelationFilterExpertEnsemble, FcnFilterExpertEnsemble,
IncrementalCorrelationFilterThinWrapper)
| [
"hongwen.zhang@cripac.ia.ac.cn"
] | hongwen.zhang@cripac.ia.ac.cn |
60d9e0d451482ce4ec684636a7e97aae7388e9ee | 1ee3dc4fa096d12e409af3a298ba01f5558c62b5 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/bgp/learnedroute.py | dd34ddef0142c7ceacb986d045cee05335610b46 | [
"MIT"
] | permissive | parthpower/ixnetwork_restpy | 321e64a87be0a4d990276d26f43aca9cf4d43cc9 | 73fa29796a5178c707ee4e21d90ff4dad31cc1ed | refs/heads/master | 2020-07-04T13:34:42.162458 | 2019-08-13T20:33:17 | 2019-08-13T20:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,841 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedRoute(Base):
"""The LearnedRoute class encapsulates a system managed learnedRoute node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the LearnedRoute property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'learnedRoute'
def __init__(self, parent):
super(LearnedRoute, self).__init__(parent)
@property
def AsPath(self):
"""Indicates the local IP address of the BGP router.
Returns:
str
"""
return self._get_attribute('asPath')
@property
def BlockOffset(self):
"""The Label Block Offset (VBO) is the value used to help define this specific label block uniquely-as a subset of all of the possible labels.
Returns:
number
"""
return self._get_attribute('blockOffset')
@property
def BlockSize(self):
"""The size of the label block, in bytes.
Returns:
number
"""
return self._get_attribute('blockSize')
@property
def ControlWordEnabled(self):
"""Indicates if the label uses a control word.
Returns:
bool
"""
return self._get_attribute('controlWordEnabled')
@property
def IpPrefix(self):
"""The route IP address prefix.
Returns:
str
"""
return self._get_attribute('ipPrefix')
@property
def LabelBase(self):
"""The first label in the learned information.
Returns:
number
"""
return self._get_attribute('labelBase')
@property
def LocalPreference(self):
"""Indicates the value of the local preference attribute.
Returns:
number
"""
return self._get_attribute('localPreference')
@property
def MaxLabel(self):
"""The last label to use.
Returns:
number
"""
return self._get_attribute('maxLabel')
@property
def MultiExitDiscriminator(self):
"""A metric field of the route file.
Returns:
number
"""
return self._get_attribute('multiExitDiscriminator')
@property
def Neighbor(self):
"""The local IP address for this Ixia-emulated BGP neighbor/peer.
Returns:
str
"""
return self._get_attribute('neighbor')
@property
def NextHop(self):
"""The next hop on the path to the destination network in the learned route.
Returns:
str
"""
return self._get_attribute('nextHop')
@property
def OriginType(self):
"""An indication of where the route entry originated.
Returns:
str
"""
return self._get_attribute('originType')
@property
def PrefixLength(self):
"""The prefix length of the route.
Returns:
number
"""
return self._get_attribute('prefixLength')
@property
def RouteDistinguisher(self):
"""The route distinguisher for the route, for use with IPv4 and IPv6 MPLS VPN address types.
Returns:
str
"""
return self._get_attribute('routeDistinguisher')
@property
def SeqDeliveryEnabled(self):
"""Indicates if sequencial delivery is enabled.
Returns:
bool
"""
return self._get_attribute('seqDeliveryEnabled')
@property
def SiteId(self):
"""The site ID.
Returns:
number
"""
return self._get_attribute('siteId')
def find(self, AsPath=None, BlockOffset=None, BlockSize=None, ControlWordEnabled=None, IpPrefix=None, LabelBase=None, LocalPreference=None, MaxLabel=None, MultiExitDiscriminator=None, Neighbor=None, NextHop=None, OriginType=None, PrefixLength=None, RouteDistinguisher=None, SeqDeliveryEnabled=None, SiteId=None):
"""Finds and retrieves learnedRoute data from the server.
All named parameters support regex and can be used to selectively retrieve learnedRoute data from the server.
By default the find method takes no parameters and will retrieve all learnedRoute data from the server.
Args:
AsPath (str): Indicates the local IP address of the BGP router.
BlockOffset (number): The Label Block Offset (VBO) is the value used to help define this specific label block uniquely-as a subset of all of the possible labels.
BlockSize (number): The size of the label block, in bytes.
ControlWordEnabled (bool): Indicates if the label uses a control word.
IpPrefix (str): The route IP address prefix.
LabelBase (number): The first label in the learned information.
LocalPreference (number): Indicates the value of the local preference attribute.
MaxLabel (number): The last label to use.
MultiExitDiscriminator (number): A metric field of the route file.
Neighbor (str): The local IP address for this Ixia-emulated BGP neighbor/peer.
NextHop (str): The next hop on the path to the destination network in the learned route.
OriginType (str): An indication of where the route entry originated.
PrefixLength (number): The prefix length of the route.
RouteDistinguisher (str): The route distinguisher for the route, for use with IPv4 and IPv6 MPLS VPN address types.
SeqDeliveryEnabled (bool): Indicates if sequencial delivery is enabled.
SiteId (number): The site ID.
Returns:
self: This instance with matching learnedRoute data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of learnedRoute data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the learnedRoute data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
10c49e36a3733040248f87d8388adf44086acf6d | 1e508a8d3a491acfb7a58d7917bad6a1cd08aab1 | /ocdskingfisher/cli/commands/run.py | 2780206131c9aa7b6f200f2225379beaf5bbc8d9 | [
"BSD-3-Clause"
] | permissive | spendnetwork/kingfisher-scrape | fcbbd8fa5278bbc40b5517b70632de73b6831634 | 053910b278eed9b3e79ca2f05a04086300b8bc21 | refs/heads/master | 2020-05-20T17:13:35.846004 | 2019-11-19T06:51:19 | 2019-11-19T06:51:19 | 185,683,174 | 0 | 0 | BSD-3-Clause | 2019-05-08T21:42:05 | 2019-05-08T21:42:05 | null | UTF-8 | Python | false | false | 3,855 | py | import ocdskingfisher.cli.commands.base
import ocdskingfisher.sources_util
class RunCLICommand(ocdskingfisher.cli.commands.base.CLICommand):
command = 'run'
def __init__(self, config=None):
self.config = config
self.sources = ocdskingfisher.sources_util.gather_sources()
def configure_subparser(self, subparser):
subparser.add_argument("source", help="run one or more sources", nargs="*")
subparser.add_argument("--all", help="run all sources", action="store_true")
subparser.add_argument("--sample", help="Run sample only", action="store_true")
subparser.add_argument("--dataversion", help="Specify a data version to resume")
subparser.add_argument("--newversion",
help="Forces the creation of a new data version (If you don't specify this or " +
"--dataversion, the latest version will be used. If there are no versions, a new one will be created.)",
action="store_true")
subparser.add_argument("--note", help="Specify a note to save")
for source_id, source_class in self.sources.items():
for argument_definition in source_class.argument_definitions:
subparser.add_argument('--' + argument_definition['name'], help=argument_definition['help'])
def run_command(self, args):
run = []
if args.all and args.source:
print("You need to either specify a source or use --all flag, not both.")
quit(-1)
if args.all:
for source_id, source_class in self.sources.items():
run.append({'id': source_id, 'source_class': source_class})
elif args.source:
for selected_source in args.source:
if selected_source in self.sources:
run.append({'id': selected_source, 'source_class': self.sources[selected_source]})
else:
print("We can not find a source that you requested! You requested: %s" % selected_source)
quit(-1)
if not run:
print("You have not specified anything to run! Try listing your sources names or flag --all")
print("You can run:")
for source_id, source_info in sorted(self.sources.items()):
print(" - %s" % source_id)
quit(-1)
remove_dir = False
sample_mode = args.sample
data_version = args.dataversion
new_version = args.newversion
if args.verbose:
print("We will run: ")
for sourceInfo in run:
print(" - %s" % sourceInfo['id'])
if sample_mode:
print("Sample mode is on!")
else:
print("Sample mode is off.")
for source_info in run:
instance = source_info['source_class'](self.config.data_dir,
remove_dir=remove_dir,
sample=sample_mode,
data_version=data_version,
new_version=new_version,
config=self.config,
note=args.note,
)
instance.set_arguments(args)
if args.verbose:
print("Now running: %s (Output Dir: %s, Data Version: %s)" % (source_info['id'], instance.output_directory, instance.data_version))
if args.verbose:
print(" - gathering ...")
instance.run_gather()
if args.verbose:
print(" - fetching ...")
instance.run_fetch()
| [
"james.baster@opendataservices.coop"
] | james.baster@opendataservices.coop |
a881e9229b13b365c7d06bed9b0336d131436f71 | 8b1aa2e61f1b41b8f75a0e94aa5f53fc801dc357 | /Python/maximum-distance-in-arrays.py | 4a24c82f43ddecd8279b927fc09b57c4b8d9a723 | [
"MIT"
] | permissive | aditya-AI/LeetCode | 539cae41f8b494ed39bec0a10c561f17c40dad38 | 0fe4a3c3a1d31230c9b5c931ff1e33584f1ccd4e | refs/heads/master | 2021-01-02T08:24:36.970321 | 2017-07-31T13:56:23 | 2017-07-31T13:56:23 | 99,005,326 | 5 | 1 | null | 2017-08-01T13:38:32 | 2017-08-01T13:38:31 | null | UTF-8 | Python | false | false | 1,328 | py | # Time: O(n)
# Space: O(1)
# Given m arrays, and each array is sorted in ascending order.
# Now you can pick up two integers from two different arrays (each array picks one)
# and calculate the distance.
# We define the distance between two integers a and b to be their absolute difference |a-b|.
# Your task is to find the maximum distance.
#
# Example 1:
# Input:
# [[1,2,3],
# [4,5],
# [1,2,3]]
# Output: 4
# Explanation:
# One way to reach the maximum distance 4 is to pick 1 in the first or third array
# and pick 5 in the second array.
# Note:
# Each given array will have at least 1 number. There will be at least two non-empty arrays.
# The total number of the integers in all the m arrays will be in the range of [2, 10000].
# The integers in the m arrays will be in the range of [-10000, 10000].
class Solution(object):
def maxDistance(self, arrays):
"""
:type arrays: List[List[int]]
:rtype: int
"""
result, min_val, max_val = 0, arrays[0][0], arrays[0][-1]
for i in xrange(1, len(arrays)):
result = max(result, \
max(max_val - arrays[i][0], \
arrays[i][-1] - min_val))
min_val = min(min_val, arrays[i][0])
max_val = max(max_val, arrays[i][-1])
return result | [
"kamyu104@gmail.com"
] | kamyu104@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.