blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
12a50275d49eb47f247e28b62b8b6aec74918723
ba3231b25c60b73ca504cd788efa40d92cf9c037
/nitro-python-13.0.36/nssrc/com/citrix/netscaler/nitro/resource/config/appqoe/appqoepolicy_lbvserver_binding.py
8ee87e1e11a7806b4dc6076f7173a54e13258719
[ "Apache-2.0", "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
zhuweigh/vpx13
f6d559ae85341e56472e3592cbc67062dac34b93
b36caa3729d3ca5515fa725f2d91aeaabdb2daa9
refs/heads/master
2020-07-04T22:15:16.595728
2019-09-20T00:19:56
2019-09-20T00:19:56
202,435,307
0
0
null
null
null
null
UTF-8
Python
false
false
5,627
py
# # Copyright (c) 2008-2019 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class appqoepolicy_lbvserver_binding(base_resource) : """ Binding class showing the lbvserver that can be bound to appqoepolicy. """ def __init__(self) : self._boundto = None self._bindpriority = None self._activepolicy = None self._gotopriorityexpression = None self._name = None self.___count = None @property def name(self) : r""".<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : r""".<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def boundto(self) : r"""The name of the entity to which the policy is bound. """ try : return self._boundto except Exception as e: raise e @boundto.setter def boundto(self, boundto) : r"""The name of the entity to which the policy is bound. """ try : self._boundto = boundto except Exception as e: raise e @property def bindpriority(self) : r"""Specifies the binding of the policy. use only in display. """ try : return self._bindpriority except Exception as e: raise e @property def gotopriorityexpression(self) : r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE. """ try : return self._gotopriorityexpression except Exception as e: raise e @property def activepolicy(self) : try : return self._activepolicy except Exception as e: raise e def _get_nitro_response(self, service, response) : r""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(appqoepolicy_lbvserver_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.appqoepolicy_lbvserver_binding except Exception as e : raise e def _get_object_name(self) : r""" Returns the value of object identifier argument """ try : if self.name is not None : return str(self.name) return None except Exception as e : raise e @classmethod def get(cls, service, name="", option_="") : r""" Use this API to fetch appqoepolicy_lbvserver_binding resources. """ try : if not name : obj = appqoepolicy_lbvserver_binding() response = obj.get_resources(service, option_) else : obj = appqoepolicy_lbvserver_binding() obj.name = name response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, name, filter_) : r""" Use this API to fetch filtered set of appqoepolicy_lbvserver_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = appqoepolicy_lbvserver_binding() obj.name = name option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service, name) : r""" Use this API to count appqoepolicy_lbvserver_binding resources configued on NetScaler. """ try : obj = appqoepolicy_lbvserver_binding() obj.name = name option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, name, filter_) : r""" Use this API to count the filtered set of appqoepolicy_lbvserver_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = appqoepolicy_lbvserver_binding() obj.name = name option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class appqoepolicy_lbvserver_binding_response(base_response) : def __init__(self, length=1) : self.appqoepolicy_lbvserver_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.appqoepolicy_lbvserver_binding = [appqoepolicy_lbvserver_binding() for _ in range(length)]
[ "zhuwei@xsky.com" ]
zhuwei@xsky.com
0777d5132fd348e362404fcaf64c9b94d87dde03
07c0500edd527522036760a8cadd4e62e5eb1dcb
/python/union.py
eddcceb5df8731716bf71ff7d29e74468f32ad9c
[]
no_license
amarsyelane/pythonprograms
261323dc453b42d3ba21ae8496d55bfded130dbd
fffc1a0edfd25577beb84e64059ff3d38bc91c35
refs/heads/master
2020-06-24T23:37:09.630903
2019-07-27T16:47:00
2019-07-27T16:47:00
199,127,095
0
1
null
null
null
null
UTF-8
Python
false
false
165
py
def union_list(list1,list2): list3 = list(set(list1) | set(list2)) return list3 list1 = [1,2,3,4,5,6] list2 = [7,1,8,2,9,3] print(union_list(list1,list2))
[ "root@localhost.localdomain" ]
root@localhost.localdomain
9db6d4b5e220c435d6b8050e7284d665f189d8ca
b7054c7dc39eeb79aa4aecb77a8de222400b19a7
/flask_project/d116/s9day116/s4.py
f36a629ef052544a1412614a0f0fd9f2f349f15c
[]
no_license
csuxh/python_fullstack
89027133c7f9585931455a6a85a24faf41792379
f78571976b3bef104309e95304892fdb89739d9e
refs/heads/master
2023-05-11T09:36:40.482788
2019-06-12T14:21:26
2019-06-12T14:21:26
145,090,531
0
0
null
null
null
null
UTF-8
Python
false
false
1,285
py
from flask import Flask,url_for app = Flask(__name__) # 步骤一:定制类 from werkzeug.routing import BaseConverter class RegexConverter(BaseConverter): """ 自定义URL匹配正则表达式 """ def __init__(self, map, regex): super(RegexConverter, self).__init__(map) self.regex = regex def to_python(self, value): """ 路由匹配时,匹配成功后传递给视图函数中参数的值 :param value: :return: """ return int(value) def to_url(self, value): """ 使用url_for反向生成URL时,传递的参数经过该方法处理,返回的值用于生成URL中的参数 :param value: :return: """ val = super(RegexConverter, self).to_url(value) return val # 步骤二:添加到转换器 app.url_map.converters['reg'] = RegexConverter """ 1. 用户发送请求 2. flask内部进行正则匹配 3. 调用to_python(正则匹配的结果)方法 4. to_python方法的返回值会交给视图函数的参数 """ # 步骤三:使用自定义正则 @app.route('/index/<reg("\d+"):nid>') def index(nid): print(nid,type(nid)) print(url_for('index',nid=987)) return "index" if __name__ == '__main__': app.run()
[ "csuxh@foxmail.com" ]
csuxh@foxmail.com
026f639e2ed9a7f66dd6b76d9b08614d82558dcf
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/nouns/_capabilities.py
b55c60ffb16290122194860366c1e164cb963345
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
268
py
from xai.brain.wordbase.nouns._capability import _CAPABILITY #calss header class _CAPABILITIES(_CAPABILITY, ): def __init__(self,): _CAPABILITY.__init__(self) self.name = "CAPABILITIES" self.specie = 'nouns' self.basic = "capability" self.jsondata = {}
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
7bb1a27b99e94f1e6998524a589ab4c12d7a6f47
7142c3941481e661075154d714a29d5e283a3074
/AddingItemsToTheSet1.py
afcece78482c26ddc68528e02fb846b611a5ffc8
[]
no_license
nirajan5/Demo
5642a9669fedcca47b0304ac423c0b3e6333b8e2
2451875bf5698cd38af69baa117c14099951bc9f
refs/heads/master
2023-07-27T17:04:03.689673
2021-09-15T11:14:25
2021-09-15T11:14:25
406,732,005
0
0
null
null
null
null
UTF-8
Python
false
false
349
py
Months = set(["January","February", "March", "April", "May", "June"]) print("\nprinting the original set ... ") print(Months) print("\nAdding other months to the set..."); Months.add("July"); Months.add ("August"); print("\nPrinting the modified set..."); print(Months) print("\nlooping through the set elements ... ") for i in Months: print(i)
[ "jhanirajan5@gmail.com" ]
jhanirajan5@gmail.com
d9265b72e4b21399cff19be1d8ba9b2a8d85c546
94bd78e63de94859eb076e52683f73f6ea91eae3
/726.py
66ae50ab0507c371d78444f0c6784ac873ce6cc8
[]
no_license
MadSkittles/leetcode
70598c1c861a8ff5d2f7c921a311307d55770acc
817bbb73dfe095b9c9358dc459ba6605a2a9a256
refs/heads/master
2021-11-30T04:56:02.432749
2021-11-12T03:28:47
2021-11-12T03:28:47
123,558,601
2
1
null
null
null
null
UTF-8
Python
false
false
2,240
py
class Solution: def countOfAtoms(self, formula): res = '' c = self.f(formula) for element in sorted(c.keys()): res += element + (str(c[element]) if c[element] > 1 else '') return res def f(self, formula: str): from collections import Counter if not formula: return Counter() i = formula.find('(') if i >= 0: j, cnt = i + 1, 1 while cnt > 0: cnt += {'(': 1, ')': -1}.get(formula[j], 0) j += 1 k = j + 1 while k < len(formula) and formula[k].isdigit(): k += 1 time = formula[j:k] time = int(time) if time else 1 tmp = self.f(formula[i + 1:j - 1]) for e in tmp: tmp[e] *= time return self.f(formula[:i]) + tmp + self.f(formula[k:]) else: res = Counter() element = '' index = 0 while index < len(formula): if element and (formula[index].isdigit() or formula[index].isupper()): if formula[index].isupper(): res[element] += 1 element = formula[index] else: k = index while index < len(formula) and formula[index].isdigit(): index += 1 time = int(formula[k:index]) res[element] += time element = formula[index] if index < len(formula) else '' else: element += formula[index] index += 1 if element: res[element] += 1 return res if __name__ == '__main__': solution = Solution() print(solution.countOfAtoms( "(((U42Se42Fe10Mc31Rh49Pu49Sb49)49V39Tm50Zr44Og6)33((W2Ga48Tm14Eu46Mt12)23(RuRnMn11)7(Yb15Lu34Ra19CuTb2)47(Md38BhCu48Db15Hf12Ir40)7CdNi21(Db40Zr24Tc27SrBk46Es41DsI37Np9Lu16)46(Zn49Ho19RhClF9Tb30SiCuYb16)15)37(Cr48(Ni31)25(La8Ti17Rn6Ce35)36(Sg42Ts32Ca)37Tl6Nb47Rh32NdGa18Cm10Pt49(Ar37RuSb30Cm32Rf28B39Re7F36In19Zn50)46)38(Rh19Md23No22PoTl35Pd35Hg)41)50"))
[ "noreply@github.com" ]
MadSkittles.noreply@github.com
b20577618c5c7056d9ef2c7eb75c7e3783a2ada2
b453635bb1a1b767179250ddf5200dd5982752e3
/apps/home/urls.py
c0bb3f878ae81bbf037a8a83efc38440cbcde5c5
[ "Unlicense" ]
permissive
orhan1616/django-russian-ecommerce
21e5c52f13fea5e7bb2f4fac2e4b9657729b33a5
9e64a0500ae529fc81e6ed2fb335b33d0ae5354a
refs/heads/main
2023-06-17T22:06:39.199411
2021-07-07T11:54:21
2021-07-07T11:54:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
306
py
# apps/home/urls.py # Django modules from django.urls import path # Django locals from apps.home import views urlpatterns = [ path('', views.homepage, name='homepage'), path('aboutus/', views.aboutuspage, name='aboutuspage'), path('contactus/', views.contactuspage, name='contactuspage'), ]
[ "ingafter60@outlook.com" ]
ingafter60@outlook.com
13173968ed6a9c4b62bcd9f9f66a07bacb5f1b35
e1b8fb9a5500516f28d3d7e9a5f259c49ef35f14
/top/api/rest/SkusCustomGetRequest.py
70aeced78d8d92d2a43572c618d79dcdc9b6eee7
[]
no_license
htom78/taobao_comet_py
9224dbca1a413a54bcc5569873e4c7a9fc9ba059
ad8b2e983a14d3ab7665244449f79dd72f390815
refs/heads/master
2020-05-17T10:47:28.369191
2013-08-27T08:50:59
2013-08-27T08:50:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
329
py
''' Created by auto_sdk on 2013-06-16 16:36:02 ''' from top.api.base import RestApi class SkusCustomGetRequest(RestApi): def __init__(self,domain='gw.api.taobao.com',port=80): RestApi.__init__(self,domain, port) self.fields = None self.outer_id = None def getapiname(self): return 'taobao.skus.custom.get'
[ "tomhu@ekupeng.com" ]
tomhu@ekupeng.com
9953ddae4f7a966b2c28335395b9fe7a270804e1
653a3d9d66f3d359083cb588fc7c9ece8bb48417
/test/runtime/frontend_test/onnx_test/defs_test/math_test/sqrt_test.py
9374a12650ad590ad76c5896649b7ba2359b530a
[ "Zlib", "MIT" ]
permissive
leonskim/webdnn
fec510254b15f3dec00f5bed8f498737b372e470
f97c798c9a659fe953f9dc8c8537b8917e4be7a2
refs/heads/master
2020-04-15T18:42:43.632244
2019-01-10T10:07:18
2019-01-10T10:07:18
164,921,764
0
0
NOASSERTION
2019-01-09T19:07:35
2019-01-09T19:07:30
Python
UTF-8
Python
false
false
820
py
import numpy as np from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model from test.util import wrap_template, generate_kernel_test_case from webdnn.frontend.onnx import ONNXConverter @wrap_template def template(x_shape, description: str = ""): vx = np.random.rand(*x_shape) + 1.0 vy = np.sqrt(vx) x = make_tensor_value_info("x", vx.shape) y = make_tensor_value_info("y", vy.shape) operator = make_node("Sqrt", ["x"], ["y"]) model = make_model([operator], [x], [y]) graph = ONNXConverter().convert(model) generate_kernel_test_case( description=f"[ONNX] Sqrt {description}", graph=graph, inputs={graph.inputs[0]: vx}, expected={graph.outputs[0]: vy} ) def test(): template(x_shape=[2, 3, 4, 5])
[ "y.kikura@gmail.com" ]
y.kikura@gmail.com
b9d91097495c60f324c7dcc2ec855d4b5c1f5550
9acfe8ea905a7613b232cf9e512311289d4e5e27
/CodeForce/Round4_2/task3.py
8513d85beef58c02028922218a82b9bb0ca9bdec
[]
no_license
antofik/Python
e790ecb61babb23fad198ba996f24b31fdff9f39
bb6ab6cd87d7bfb1d6efca6623b4b00c387313a8
refs/heads/master
2020-12-24T14:27:39.341992
2014-03-09T07:32:07
2014-03-09T07:32:07
17,551,107
1
0
null
null
null
null
UTF-8
Python
false
false
3,426
py
# coding=utf-8 """ C. Система регистрации ограничение по времени на тест:5 seconds ограничение по памяти на тест:64 megabytes ввод:standard input вывод:standard output В скором времени в Берляндии откроется новая почтовая служба "Берляндеск". Администрация сайта хочет запустить свой проект как можно быстрее, поэтому они попросили Вас о помощи. Вам предлагается реализовать прототип системы регистрации сайта. Система должна работать по следующему принципу. Каждый раз, когда новый пользователь хочет зарегистрироваться, он посылает системе запрос name со своим именем. Если данное имя не содержится в базе данных системы, то оно заносится туда и пользователю возвращается ответ OK, подтверждающий успешную регистрацию. Если же на сайте уже присутствует пользователь с именем name, то система формирует новое имя и выдает его пользователю в качестве подсказки, при этом подсказка также добавляется в базу данных. Новое имя формируется по следующему правилу. К name последовательно приписываются числа, начиная с единицы (name1, name2, ...), и среди них находят такое наименьшее i, что namei не содержится в базе данных сайта. Входные данные В первой строке входных данных задано число n (1 ≤ n ≤ 105). Следующие n строк содержат запросы к системе. Каждый запрос представляет собой непустую строку длиной не более 32 символов, состоящую только из строчных букв латинского алфавита. Выходные данные В выходных данных должно содержаться n строк — ответы системы на запросы: ОК в случае успешной регистрации, или подсказку с новым именем, если запрашиваемое уже занято. """ from collections import defaultdict from sys import stdin, exit def task(): stdin.readline() names = map(str.strip, stdin.readlines()) response = [] d = defaultdict(int) for name in names: i = d[name] if i == 0: response.append("OK") d[name] += 1 else: while True: kandidate = '%s%s' % (name, i) if d[kandidate] == 0: d[kandidate] += 1 d[name] = i response.append(kandidate) break i += 1 for r in response: print r task()
[ "antofik@gmail.com" ]
antofik@gmail.com
28968f4cf0904ba68959fcbeb4aeeacea48d661f
c20534744b07252421aef4e157101eeb2f8a7090
/django/project4/books/models.py
d9420ddcc23642a44a7fe1fb1bfa357abbd5ae0d
[]
no_license
lee-seul/development_practice
eb34a9be21ba2b8f20646420e903f07343f55f95
b56fcdded15bf437e365c7d8ffe6981127adb5d4
refs/heads/master
2020-12-25T23:10:16.112757
2018-04-02T07:36:14
2018-04-02T07:36:14
44,519,248
2
0
null
null
null
null
UTF-8
Python
false
false
703
py
from django.db import models class Author(models.Model): saluation = models.CharField(max_length=100) name = models.CharField(max_length=50) email = models.EmailField() def __unicode__(self): return self.name class Publisher(models.Model): name = models.CharField(max_length=50) address = models.CharField(max_length=100) website = models.URLField() def __unicode__(self): return self.name class Book(models.Model): title = models.CharField(max_length=100) authors = models.ManyToManyField('Author') publisher = models.ForeignKey(Publisher) publication_date = models.DateField() def __unicode__(self): return self.title
[ "blacksangi14@naver.com" ]
blacksangi14@naver.com
b377681d860d1c9cfb40f5ae30246ba53a8f0ef7
2225034e851e936b813005ce3948d76a2c0f85aa
/accounts/migrations/0003_alter_user_user_type.py
065708962d7354caf2b9947eac6e7ae9b7753fb4
[]
no_license
khaled-hamada/doctor-online-app-task
83a6f6864467552f4b08f0832a441ddcaaa8a3e7
a6d5c8284cc21f21bb5227281ff27b3c3a1ad8d6
refs/heads/main
2023-07-12T06:52:11.425656
2021-08-24T13:17:18
2021-08-24T13:17:18
399,429,996
0
0
null
null
null
null
UTF-8
Python
false
false
413
py
# Generated by Django 3.2.6 on 2021-08-23 11:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0002_alter_user_user_type'), ] operations = [ migrations.AlterField( model_name='user', name='user_type', field=models.CharField(blank=True, max_length=64, null=True), ), ]
[ "khaledosman737@gmail.com" ]
khaledosman737@gmail.com
27f44b74cc5fedd3f218a356d0844f7d2ffac7e5
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/96/usersdata/184/53936/submittedfiles/estatistica.py
54102a6b3c85cedfd1bbea3a41e1c235f0059378
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
890
py
# -*- coding: utf-8 -*- def media(lista): soma = 0 for i in range(0,len(lista),1): soma = soma + lista[i] resultado = soma/len(lista) return resultado def variância(lista): media=media(lista) soma=0 variância=0 for i in range(0,len(lista),1): soma=((lista[i]-media)**2) variancia=soma/float(len(lista)) return variancia def desviopadrao(lista): return math.sqrt(variancia(lista)) a=[] b=[] n=input('digite a quantidade de elementos:') for i in range(0,n,1): a.append(input('digite um elemnto:')) for i in range(0,n,1): b.append(input('digite um elemnto:')) media_a=media(a) media_b=media(b) desviopadrao_a=desviopadrao(a) desviopadrao_b=desviopadrao(b) print media_a print desviopadrao_a print media_b print desviopadrao_b #Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
a6df2cb4a8a4471534fc5fe9ba7d6996c2dff035
cc44edfa1edbedea3ad044805be7548e0ccba70d
/0x0F-python-object_relational_mapping/13-model_state_delete_a.py
c044b9f72162be0367f0edae4457370fc78e42b1
[]
no_license
set808/holbertonschool-higher_level_programming
421f0da1f91cd56eb2daa4e07a51b4a505d53edc
eb276a4e68e5cc43498459eec78fc05f72e2cd48
refs/heads/master
2020-03-09T13:07:43.824914
2018-09-08T00:26:46
2018-09-08T00:26:46
128,802,718
0
0
null
null
null
null
UTF-8
Python
false
false
663
py
#!/usr/bin/python3 """List objects that contain the letter a """ from sys import argv from model_state import Base, State from sqlalchemy import (create_engine) from sqlalchemy import update from sqlalchemy.orm import sessionmaker def main(argv): engine = create_engine( 'mysql+mysqldb://{}:{}@localhost:3306/{}'.format( argv[1], argv[2], argv[3]), pool_pre_ping=True) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() for state in session.query(State).filter(State.name.like('%a%')): session.delete(state) session.commit() if __name__ == "__main__": main(argv)
[ "spencertaylor808@gmail.com" ]
spencertaylor808@gmail.com
ed42e215d09f16cabbe9caef947294944b5956c2
c80ec1805a7e6cb1bd3f4b3e383ef4f4cf164765
/gen/datehandler/__init__.py
b3a56bafa582242949c6717acefc05dda78990b6
[]
no_license
balrok/gramps_addon
57c8e976c47ea3c1d1298d3fd4406c13909ac933
0c79561bed7ff42c88714edbc85197fa9235e188
refs/heads/master
2020-04-16T03:58:27.818732
2015-02-01T14:17:44
2015-02-01T14:17:44
30,111,898
2
1
null
null
null
null
UTF-8
Python
false
false
3,166
py
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2004-2007 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Class handling language-specific selection for date parser and displayer. """ from __future__ import print_function, unicode_literals #------------------------------------------------------------------------- # # set up logging # #------------------------------------------------------------------------- import logging from ..const import GRAMPS_LOCALE as glocale _ = glocale.translation.sgettext # import prerequisites for localized handlers from ._datehandler import (LANG, LANG_SHORT, LANG_TO_PARSER, LANG_TO_DISPLAY, register_datehandler) from . import _datestrings # Import all the localized handlers from . import _date_ar from . import _date_bg from . import _date_ca from . import _date_cs from . import _date_da from . import _date_de from . import _date_el from . import _date_es from . import _date_fi from . import _date_fr from . import _date_hr from . import _date_it from . import _date_lt from . import _date_nb from . import _date_nl from . import _date_pl from . import _date_pt from . import _date_ru from . import _date_sk from . import _date_sl from . import _date_sr from . import _date_sv from . import _date_uk # Initialize global parser try: if LANG in LANG_TO_PARSER: parser = LANG_TO_PARSER[LANG]() else: parser = LANG_TO_PARSER[LANG_SHORT]() except: logging.warning(_("Date parser for '%s' not available, using default") % LANG) parser = LANG_TO_PARSER["C"]() # Initialize global displayer try: from ..config import config val = config.get('preferences.date-format') except: val = 0 try: if LANG in LANG_TO_DISPLAY: displayer = LANG_TO_DISPLAY[LANG](val) else: displayer = LANG_TO_DISPLAY[LANG_SHORT](val) except: logging.warning(_("Date displayer for '%s' not available, using default") % LANG) displayer = LANG_TO_DISPLAY["C"](val) # Import utility functions from ._dateutils import * from ._grampslocale import (codeset, tformat) if __name__ == "__main__": from ._datedisplay import DateDisplay m = 0 for l,d in LANG_TO_DISPLAY.items(): if len(l) != 2: continue m = max(m, len(d.formats)) print("{}: {} {} own dg: {}".format( l, len(d.formats), d.formats, d._display_gregorian != DateDisplay._display_gregorian)) print("MAX: ", m)
[ "carl.schoenbach@gmail.com" ]
carl.schoenbach@gmail.com
228724cb4ba363074d939a34c441ba86ebe9c6c2
25cb0013b8e635dd5a7cc189819d43191f571fe2
/tests/parser/test_wikipedia.py
09d7a2f3bcca27a44bfcb1147cb6ef82d0cd2e7c
[ "MIT" ]
permissive
openmicroanalysis/pyxray
e40a1c575b8f5002f162c611c4ffcf00ff028907
b06478f6c251d92e878713d18d5c7bc063bac0fb
refs/heads/master
2023-03-18T11:41:28.806694
2023-03-10T09:35:58
2023-03-10T09:35:58
57,978,665
4
2
MIT
2023-03-10T09:35:59
2016-05-03T15:10:14
Python
UTF-8
Python
false
false
299
py
#!/usr/bin/env python """ """ # Standard library modules. # Third party modules. # Local modules. from pyxray.parser.wikipedia import WikipediaElementNameParser # Globals and constants variables. def test_wikipedia(): parser = WikipediaElementNameParser() assert len(list(parser)) > 0
[ "philippe.pinard@gmail.com" ]
philippe.pinard@gmail.com
3f786d9369922895b5661bc3d6051ba8a9589b3a
3a891a79be468621aae43defd9a5516f9763f36e
/desktop/core/ext-py/nose-1.3.7/examples/plugin/setup.py
4dd5dad316fc51c1078d9f21e31d231609ef1249
[ "Apache-2.0", "LGPL-2.1-only" ]
permissive
oyorooms/hue
b53eb87f805063a90f957fd2e1733f21406269aa
4082346ef8d5e6a8365b05752be41186840dc868
refs/heads/master
2020-04-15T20:31:56.931218
2019-01-09T19:02:21
2019-01-09T19:05:36
164,998,117
4
2
Apache-2.0
2019-01-10T05:47:36
2019-01-10T05:47:36
null
UTF-8
Python
false
false
523
py
""" An example of how to create a simple nose plugin. """ try: import ez_setup ez_setup.use_setuptools() except ImportError: pass from setuptools import setup setup( name='Example plugin', version='0.1', author='Jason Pellerin', author_email = 'jpellerin+nose@gmail.com', description = 'Example nose plugin', license = 'GNU LGPL', py_modules = ['plug'], entry_points = { 'nose.plugins.0.10': [ 'example = plug:ExamplePlugin' ] } )
[ "ranade@cloudera.com" ]
ranade@cloudera.com
85100f9a205b487eaaf3372ea12fccd23da2983d
9316e155538af98001c6d7551e721b6160c99bd7
/run_ants.py
716f1e81bac243ddd8c7e9ef3e5e5a461ccb5298
[ "Apache-2.0" ]
permissive
binarybottle/mindboggle_sidelined
2c7c6591d199a5b715cb028d3374c1a426fb4341
1431d4877f4ceae384486fb66798bc22e6471af7
refs/heads/master
2016-09-06T17:24:32.646682
2015-02-16T04:45:44
2015-02-16T04:45:44
30,854,143
3
0
null
null
null
null
UTF-8
Python
false
false
1,650
py
""" Run antsCorticalThickness.sh on Mindboggle-101 brains """ import os run_all = True if run_all: names = ['OASIS-TRT-20', 'MMRR-21', 'NKI-RS-22', 'NKI-TRT-20', 'Afterthought', 'Colin27', 'Twins-2', 'MMRR-3T7T-2', 'HLN-12'] numbers = [20,21,22,20,1,1,2,2,12] i1 = 0 names = [names[i1]] numbers = [numbers[i1]] path1 = '/homedir/Data/Brains/Mindboggle101/subjects/' end1a = '/mri/orig/001.mgz' end1b = '/mri/orig/001.nii.gz' path2 = '/data/Brains/Atropos_templates/OASIS-30_Atropos_template/' end2a = 'T_template0.nii.gz' end2b = 'T_template0_BrainCerebellum.nii.gz' end2c = 'T_template0_BrainCerebellumProbabilityMask.nii.gz' end2d = 'T_template0_BrainCerebellumExtractionMask.nii.gz' end2e = 'Priors2/priors%d.nii.gz' convert = False for i,name in enumerate(names): number = numbers[i] for n in range(1,number+1): if convert: s = 'mri_convert {0}{1}-{2}{3} {0}{1}-{2}{4} ' \ .format(path1, name, n, end1a, end1b) print(s) os.system(s) prefix = 'antsCorticalThickness/{0}-{1}/ants'.format(name, n) s = 'antsCorticalThickness.sh -d 3 -n 3 -w 0.25 ' \ '-a {0}{1}-{2}{3} ' \ '-o {4} ' \ '-e {5}/{6} ' \ '-t {5}/{7} ' \ '-m {5}/{8} ' \ '-f {5}/{9} ' \ '-p {5}/{10} ' \ .format(path1, name, n, end1b, prefix, path2, end2a, end2b, end2c, end2d, end2e) print(s) os.system(s)
[ "arno@binarybottle.com" ]
arno@binarybottle.com
bdbbde89fa8e66deaf81f53dee472784536d88c9
61747f324eaa757f3365fd7bf5ddd53ea0db47d1
/casepro/cases/migrations/0017_outgoing_text.py
f5a2e63d2c2bc3469b928f6e54c6172ceabe1ef8
[ "BSD-3-Clause" ]
permissive
BlueRidgeLabs/casepro
f8b0eefa8f961dd2fdb5da26a48b619ebc1f8c12
8ef509326f3dfa80bb44beae00b60cc6c4ac7a24
refs/heads/master
2022-01-24T09:01:18.881548
2017-12-05T18:46:05
2017-12-05T18:49:42
113,502,588
0
0
null
2017-12-07T21:57:37
2017-12-07T21:57:37
null
UTF-8
Python
false
false
404
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('cases', '0016_case_contact'), ] operations = [ migrations.AddField( model_name='outgoing', name='text', field=models.TextField(max_length=640, null=True), ), ]
[ "rowanseymour@gmail.com" ]
rowanseymour@gmail.com
a1c71cac28e8106d69520a88204e6eebf415bd96
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/adjectives/_omniscient.py
d973e99c473ac2a6a285eb5b5832fff3a0311952
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
413
py
#calss header class _OMNISCIENT(): def __init__(self,): self.name = "OMNISCIENT" self.definitions = [u'having or seeming to have unlimited knowledge: '] self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.specie = 'adjectives' def run(self, obj1, obj2): self.jsondata[obj2] = {} self.jsondata[obj2]['properties'] = self.name.lower() return self.jsondata
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
033ff5a1c621db3638e1544b4a2d3c6f3741a08d
0130c8b14927097663157846adc4b146d67d2fda
/tests/st/ops/gpu/test_fused_relu_grad_bn_reduce_grad.py
4e5f6afbc529a5aefef225ba6b5e26b43a8557f6
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "Unlicense", "BSD-3-Clause", "NCSA", "LLVM-exception", "Zlib", "BSD-2-Clause", "MIT" ]
permissive
Shigangli/akg
e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc
3766c54e0b109541932d147a6b5643a334b82403
refs/heads/master
2023-09-06T05:13:40.571583
2021-11-23T03:44:54
2021-11-23T03:44:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,586
py
# Copyright 2020-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License import numpy as np from akg.utils import kernel_exec as utils from tests.common.gen_random import random_gaussian from tests.common.test_op.resnet.fused_relu_grad_bn_reduce_grad import fused_relu_grad_bn_reduce_grad def gen_data(shape, dtype): return random_gaussian(shape, miu=1, sigma=0.1).astype(dtype) def compute_py(data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8, data_9, layout): data_tmp1 = np.multiply(data_4, data_5) n, h, w, c = np.shape(data_9) data_tmp2 = np.full(np.shape(data_tmp1), 1.0 / (n * h * w), 'float32') data_tmp3 = np.multiply(data_tmp1, data_tmp2) data_tmp5 = np.full(np.shape(data_9), 0.0, 'float16') data_tmp6 = np.greater(data_9, data_tmp5) data_tmp7 = np.where(data_tmp6, data_8, data_tmp5) data_tmp8 = data_tmp7.astype('float32') data_tmp9 = np.full(np.shape(data_9), n * h * w, 'float32') data_tmp10 = np.multiply(data_tmp8, data_tmp9) data_tmp12 = np.subtract(data_tmp10, data_3) data_tmp14 = data_7.astype('float32') data_tmp15 = np.multiply(data_6, data_tmp2) data_tmp17 = np.subtract(data_tmp14, data_tmp15) data_tmp18 = np.multiply(data_2, data_tmp17) data_tmp20 = np.divide(data_tmp18, data_1) data_tmp21 = np.subtract(data_tmp12, data_tmp20) data_tmp22 = np.multiply(data_tmp3, data_tmp21) expect = data_tmp22.astype('float16') output = np.full(np.shape(expect), np.nan, 'float16') return expect, output def test_fused_relu_grad_bn_reduce_grad(shape_1, shape_2, layout='NHWC', poly_sch=False): data_1 = gen_data(shape_1, 'float32') data_2 = gen_data(shape_1, 'float32') data_3 = gen_data(shape_1, 'float32') data_4 = gen_data(shape_1, 'float32') data_5 = gen_data(shape_1, 'float32') data_6 = gen_data(shape_1, 'float32') data_7 = gen_data(shape_2, 'float16') data_8 = gen_data(shape_2, 'float16') data_9 = gen_data(shape_2, 'float16') expect, output = compute_py(data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8, data_9, layout) input_list = [shape_1, shape_1, shape_1, shape_1, shape_1, shape_1, shape_2, shape_2, shape_2] dtype_list = ['float32', 'float32', 'float32', 'float32', 'float32', 'float32', 'float16', 'float16', 'float16'] op_attrs = [layout] if poly_sch: mod = utils.op_build_test(fused_relu_grad_bn_reduce_grad, input_list, dtype_list, kernel_name="fused_relu_grad_bn_reduce_grad", op_attrs=op_attrs, attrs={"target": "cuda"}) args = [data_1, data_2, data_3, data_4, data_5, data_6, data_7, data_8, data_9, output] output = utils.mod_launch(mod, args, expect=expect) res = np.allclose(output, expect, rtol=5e-03, atol=1e-08) print("Test {}".format("Pass" if res else "Failed")) if not res: print("Error cuda:========================") print(mod.imported_modules[0].get_source()) raise AssertionError("Test fail") return True
[ "1027252281@qq.com" ]
1027252281@qq.com
a74d54cf192a18fe0dd00295cc02d80e22709ea2
da85a249f6ffc68c71bc27b1246a77704d15770e
/tests/debug-tests/utils.py
4003318523fa789c9a341c578a8c9671aae29397
[ "BSD-3-Clause" ]
permissive
efajardo-nv/ucx-py
d8d268fc094afd28512c843dfa49f6afcb55d4bb
90e7ae0214ea23588040d00f9c9497aba0f09b65
refs/heads/master
2022-05-22T10:32:24.411103
2020-03-31T18:10:46
2020-03-31T18:10:46
257,774,594
0
0
BSD-3-Clause
2020-04-22T02:52:27
2020-04-22T02:52:26
null
UTF-8
Python
false
false
2,379
py
import argparse from distributed.comm.utils import from_frames from distributed.utils import nbytes, parse_bytes import numpy as np import ucp ITERATIONS = 50 def cuda_array(size): # import cupy # return cupy.empty(size, dtype=cupy.uint8) # return rmm.device_array(size, dtype=np.uint8) import numba.cuda return numba.cuda.device_array((size,), dtype=np.uint8) async def send(ep, frames): await ep.send(np.array([len(frames)], dtype=np.uint64)) await ep.send( np.array( [hasattr(f, "__cuda_array_interface__") for f in frames], dtype=np.bool ) ) await ep.send(np.array([nbytes(f) for f in frames], dtype=np.uint64)) # Send frames for frame in frames: if nbytes(frame) > 0: await ep.send(frame) async def recv(ep): try: # Recv meta data nframes = np.empty(1, dtype=np.uint64) await ep.recv(nframes) is_cudas = np.empty(nframes[0], dtype=np.bool) await ep.recv(is_cudas) sizes = np.empty(nframes[0], dtype=np.uint64) await ep.recv(sizes) except (ucp.exceptions.UCXCanceled, ucp.exceptions.UCXCloseError) as e: msg = "SOMETHING TERRIBLE HAS HAPPENED IN THE TEST" raise e(msg) # Recv frames # breakpoint() frames = [] for is_cuda, size in zip(is_cudas.tolist(), sizes.tolist()): if size > 0: if is_cuda: frame = cuda_array(size) else: frame = np.empty(size, dtype=np.uint8) await ep.recv(frame) frames.append(frame) else: if is_cuda: frames.append(cuda_array(size)) else: frames.append(b"") msg = await from_frames(frames) return frames, msg def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("-s", "--server", default=None, help="server address.") parser.add_argument("-p", "--port", default=13337, help="server port.", type=int) parser.add_argument( "-n", "--n-bytes", default="10 Mb", type=parse_bytes, help="Message size. Default '10 Mb'.", ) parser.add_argument( "--n-iter", default=10, type=int, help="Numer of send / recv iterations (default 10).", ) return parser.parse_args()
[ "quasiben@gmail.com" ]
quasiben@gmail.com
4f47113183c2cf8e5d890bfebbab7cf2f9c5bc3a
2cc44ba2a9b9b752fd1b1ebfd1a3681fe87c8617
/models/datamodel.py
a3054c3669606379b4b05d100a9265744241b4ac
[]
no_license
jorgec/iNav-Python-SDK
47c6a2b5e9a4eb246b9ee997b67a9726e2f9db30
57260980f8e2f5130f1abc21fd6dab7404ea05ac
refs/heads/master
2020-08-29T23:46:45.733005
2019-10-29T04:47:55
2019-10-29T04:47:55
218,206,231
0
0
null
null
null
null
UTF-8
Python
false
false
250
py
import json class DataModel: class Meta: fields = {} def as_dict(self): return {key: str(getattr(self, value)) for key, value in self.Meta.fields.items()} def serialize(self): return json.dumps(self.as_dict())
[ "jorge.cosgayon@gmail.com" ]
jorge.cosgayon@gmail.com
c476de20becad6e2a53cd18d6249c626d55d01c1
e8199f1d424592affe19b50fd96a02815067d1b1
/Apple/329. Longest Increasing Path in a Matrix.py
4b7821d007aef9791d6d78446d5a4277649b4273
[]
no_license
srajsonu/LeetCode-Solutions-Python
39a809e4c6d555a3a3055ce03d59cfa40b93a287
8ec31c8df2885f3da533424ba13060b7d3e3af78
refs/heads/master
2023-03-19T10:05:42.578615
2021-03-13T17:21:36
2021-03-13T17:21:36
280,716,200
0
1
null
2020-10-06T09:54:02
2020-07-18T18:32:04
Python
UTF-8
Python
false
false
1,003
py
from collections import deque class Solution: def isValid(self, A, i, j): if i < 0 or i >= len(A) or j < 0 or j >= len(A[0]): return False return True def dfs(self, A, i, j, dp): ans = 0 row = [1, 0, -1, 0] col = [0, 1, 0, -1] if dp[i][j]: return dp[i][j] else: for r, c in zip(row, col): nRow = i + r nCol = j + c if self.isValid(A, nRow, nCol) and A[i][j] > A[nRow][nCol]: ans = max(ans, self.dfs(A, nRow, nCol, dp)) dp[i][j] = 1 + ans return dp[i][j] def Solve(self, A): if not A: return 0 m = len(A) n = len(A[0]) dp =[[0]*n for _ in range(m)] return max(self.dfs(A, x, y, dp) for x in range(m) for y in range(n)) if __name__ == '__main__': A = [[9, 9, 4], [6, 6, 8], [2, 1, 1]] B = Solution() print(B.Solve(A))
[ "srajsonu02@gmail.com" ]
srajsonu02@gmail.com
b0778fa352eb1a191c385864773583a269064c75
2713e8a47e68d82907a4cedc6434ef1cd72d85e7
/fluo/middleware/locale.py
5ced4754e4f221eba613a13a5729fba9f5901d25
[ "MIT" ]
permissive
rsalmaso/django-fluo
a283b8f75769ac6e57fa321c607819899e0c31c8
24b9f36e85b247ea209b9c40b17599e7731f5ded
refs/heads/main
2023-01-12T01:37:06.975318
2022-12-30T22:08:40
2022-12-30T22:08:40
48,948,936
1
0
null
null
null
null
UTF-8
Python
false
false
7,060
py
# Copyright (C) 2007-2022, Raffaele Salmaso <raffaele@salmaso.org> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import annotations # taken and adapted from django-cms # Copyright (c) 2008, Batiste Bieler "this is the locale selecting middleware that will look at accept headers" import re from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponseRedirect from django.utils import translation from django.utils.cache import patch_vary_headers from django.utils.deprecation import MiddlewareMixin from fluo.settings import NO_LOCALE_PATTERNS SUB = re.compile( r'<a([^>]+)href="/(?!(%s|%s|%s))([^"]*)"([^>]*)>' % ("|".join(map(lambda l: l[0] + "/", settings.LANGUAGES)), settings.MEDIA_URL[1:], settings.STATIC_URL[1:]), ) SUB2 = re.compile( r'<form([^>]+)action="/(?!(%s|%s|%s))([^"]*)"([^>]*)>' % ("|".join(map(lambda l: l[0] + "/", settings.LANGUAGES)), settings.MEDIA_URL[1:], settings.STATIC_URL[1:]), ) SUPPORTED = dict(settings.LANGUAGES) START_SUB = re.compile(r"^/(%s)/(.*)" % "|".join(map(lambda l: l[0], settings.LANGUAGES))) NO_LOCALE_SUB = re.compile(r"^(%s|%s)(.*)" % ("|".join(NO_LOCALE_PATTERNS), settings.STATIC_URL)) LANGUAGE_COOKIE_NAME = settings.LANGUAGE_COOKIE_NAME def has_lang_prefix(path): check = START_SUB.match(path) if check is not None: return check.group(1) else: return False def skip_translation(path): check = NO_LOCALE_SUB.match(path) if check is not None: return check.group(1) else: return False def get_default_language(language_code=None): """ Returns default language depending on settings.LANGUAGE_CODE merged with best match from settings.LANGUAGES Returns: language_code Raises ImproperlyConfigured if no match found """ if not language_code: language_code = settings.LANGUAGE_CODE languages = dict(settings.LANGUAGES).keys() # first try if there is an exact language if language_code in languages: return language_code # otherwise split the language code if possible, so iso3 language_code = language_code.split("-")[0] if language_code not in languages: raise ImproperlyConfigured("No match in LANGUAGES for LANGUAGE_CODE %s" % settings.LANGUAGE_CODE) return language_code def get_language_from_request(request): language = request.GET.get("language", request.POST.get("language", None)) if language: if language not in dict(settings.LANGUAGES).keys(): language = None if language is None: language = getattr(request, "LANGUAGE_CODE", None) if language: if language not in dict(settings.LANGUAGES).keys(): language = None if language is None: language = get_default_language() return language class LocaleMiddleware(MiddlewareMixin): def get_language_from_request(self, request): changed = False prefix = has_lang_prefix(request.path_info) if prefix: request.path = "/" + "/".join(request.path.split("/")[2:]) request.path_info = "/" + "/".join(request.path_info.split("/")[2:]) t = prefix if t in SUPPORTED: lang = t if hasattr(request, "session"): request.session["django_language"] = lang else: request.set_cookie(LANGUAGE_COOKIE_NAME, lang) changed = True else: lang = translation.get_language_from_request(request) if not changed: if hasattr(request, "session"): lang = request.session.get("django_language", None) if lang in SUPPORTED and lang is not None: return lang elif LANGUAGE_COOKIE_NAME in request.COOKIES.keys(): lang = request.COOKIES.get(LANGUAGE_COOKIE_NAME, None) if lang in SUPPORTED and lang is not None: return lang if not lang: lang = translation.get_language_from_request(request) lang = get_default_language(lang) return lang def process_request(self, request): path = str(request.path) if skip_translation(path): return prefix = has_lang_prefix(request.path_info) if not prefix: return HttpResponseRedirect("/%s%s" % (settings.LANGUAGE_CODE[:2], request.get_full_path())) language = self.get_language_from_request(request) translation.activate(language) request.LANGUAGE_CODE = translation.get_language() def process_response(self, request, response): path = str(request.path) if skip_translation(path): return response patch_vary_headers(response, ["Accept-Language"]) translation.deactivate() if ( not skip_translation(path) and response.status_code == 200 and response._headers["content-type"][1].split(";")[0] == "text/html" ): response.content = SUB.sub( r'<a\1href="/%s/\3"\4>' % request.LANGUAGE_CODE, response.content.decode("utf-8"), ) response.content = SUB2.sub( r'<form\1action="/%s/\3"\4>' % request.LANGUAGE_CODE, response.content.decode("utf-8"), ) if response.status_code == 301 or response.status_code == 302: if "Content-Language" not in response: response["Content-Language"] = translation.get_language() location = response._headers["location"] prefix = has_lang_prefix(location[1]) if not prefix and location[1].startswith("/") and not skip_translation(location[1]): response._headers["location"] = ( location[0], "/%s%s" % (request.LANGUAGE_CODE, location[1]), ) return response
[ "raffaele@salmaso.org" ]
raffaele@salmaso.org
37ba7f3e9c1e421d8f3ae6e4fce5cdd0f88f331e
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_157/923.py
9d23b8f1fd1c482d24b4c5e1908d21f090e72531
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,051
py
import copy d = {('i','j'): 'k', ('j','i'): '-k', ('j','k'): 'i', ('k','j'): '-i', ('k','i'): 'j', ('i','k'): '-j'} def mul_q(q1, q2): sign = 1 if q1[0] == '-': sign *= -1 q1 = q1[1:] if q2[0] == '-': sign *= -1 q2 = q2[1:] if q1 == '1': ans = q2 elif q2 == '1': ans = q1 elif q1 == q2: ans = '-1' else: ans = d[(q1, q2)] if sign == -1: if ans[0] == '-': return ans[1:] else: return '-' + ans else: return ans def prod_string(s): qs = list(s) acc = '1' for q in s: acc = mul_q(acc, q) return acc def exp_q(q, pow): pow = pow % 4 if pow == 0: return '1' acc = '1' for i in range(pow): acc = mul_q(acc, q) return acc def solve_case(case): pow, s = case if exp_q(prod_string(s), pow) != '-1': return 'NO' pow_i = min(pow, 4) ans_i = starts_i(s * pow_i) if ans_i: remaining_chars = ans_i[1] else: return 'NO' pow_j = min(pow - pow_i, 4) ans_j = starts_j(remaining_chars + s * pow_j) if ans_j: return 'YES' else: return 'NO' def starts_i(s): qs = list(s) acc = '1' chars_used = 0 for q in s: acc = mul_q(acc, q) chars_used += 1 if acc == 'i': return (True, ''.join(qs[chars_used:])) return False def starts_j(s): qs = list(s) acc = '1' chars_used = 0 for q in s: acc = mul_q(acc, q) chars_used += 1 if acc == 'j': return (True, ''.join(qs[chars_used:])) return False f = open('c.in', 'r') lines = f.readlines() cases = [(int(lines[2*i - 1].split()[1]), lines[2*i].strip()) for i in xrange(1, 1 + len(lines)/2)] print(len(cases)) print(cases) g = open('c.out','w') for i in xrange(len(cases)): g.write('Case #' + str(i + 1) + ': ' + str(solve_case(cases[i])) + '\n') g.close()
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
1720eccbd3a5a5d87381eb2743badc0aabfe950c
2bd385ec885cc67617524d4cc251a33a7fac95a1
/models/eagleedu_syllabus.py
300b71f24090838271ffc1127bcdd072d0714bcf
[]
no_license
development-team-work/eagleedu_core
47eed702c95a4c8ba6625bd516946c56133b98a9
59cbdef95b6092a93ac12ab11bea237da345b3e7
refs/heads/master
2021-01-02T03:13:00.957763
2020-02-10T10:30:11
2020-02-10T10:30:11
239,466,974
0
0
null
2020-02-10T08:55:19
2020-02-10T08:55:18
null
UTF-8
Python
false
false
752
py
# -*- coding: utf-8 -*- from eagle.exceptions import ValidationError from eagle import fields, models, api, _ class EagleeduSyllabus(models.Model): _name = 'eagleedu.syllabus' _description = "Syllabus " _rec_name='syllabus_display' name = fields.Char(string='Name', help="Enter the Name of the Syllabus") # syllabus_code = fields.Char(string='Syllabus Code', compute="_get_code") syllabus_display=fields.Char('Syllabus Display',help="This is printed on the marksheet as Subject") standard_class_id = fields.Many2one('eagleedu.standard_class', string='Class ID') subject_id = fields.Many2one('eagleedu.subject', string='Subject') academic_year = fields.Many2one('eagleedu.academic.year', string='Academic Year')
[ "rapidgrps@princegroup-bd.com" ]
rapidgrps@princegroup-bd.com
3d30699e50d84cb0530efc016de11719e7b37e03
0e1e643e864bcb96cf06f14f4cb559b034e114d0
/Exps_7_v3/doc3d/I_w_M_to_W_focus_Zok_div/ch096/woColorJ/Sob_k25_s001_EroM/pyr_Tcrop255_p20_j15/pyr_1s/L4/step09_1side_L4.py
82aff74ef2922ec79600254bd74c9122ba6bd036
[]
no_license
KongBOy/kong_model2
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
1af20b168ffccf0d5293a393a40a9fa9519410b2
refs/heads/master
2022-10-14T03:09:22.543998
2022-10-06T11:33:42
2022-10-06T11:33:42
242,080,692
3
0
null
null
null
null
UTF-8
Python
false
false
5,656
py
############################################################################################################################################################################################################# ############################################################################################################################################################################################################# ### 把 kong_model2 加入 sys.path import os from tkinter import S code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層 kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層 kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir import sys ### 把 kong_model2 加入 sys.path sys.path.append(kong_model2_dir) # print(__file__.split("\\")[-1]) # print(" code_exe_path:", code_exe_path) # print(" code_exe_path_element:", code_exe_path_element) # print(" kong_layer:", kong_layer) # print(" kong_model2_dir:", kong_model2_dir) ############################################################################################################################################################################################################# from step08_b_use_G_generate_I_w_M_to_Wx_Wy_Wz_combine import I_w_M_to_W from step08_b_use_G_generate_0_util import Tight_crop, Color_jit from step09_c_train_step import Train_step_I_w_M_to_W from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME color_jit = None use_what_gen_op = I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) ) use_what_train_step = Train_step_I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit ) use_hid_ch = 96 import time start_time = time.time() ############################################################################################################################################################################################### ############################################################################################################################################################################################### ########################################################### Block1 ### Block1 ######################################################################################### pyramid_1side_1 = [1, 0, 0, 0, 0, 0, 0, 0, 1] pyramid_1side_2 = [1, 1, 0, 0, 0, 0, 0, 1, 1] pyramid_1side_3 = [1, 1, 1, 0, 0, 0, 1, 1, 1] pyramid_1side_4 = [1, 1, 1, 1, 0, 1, 1, 1, 1] pyramid_1side_5 = [1, 1, 1, 1, 1, 1, 1, 1, 1] ######################################################################################### ch032_pyramid_1side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step ) ch032_pyramid_1side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step ) ch032_pyramid_1side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step ) ch032_pyramid_1side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step ) ch032_pyramid_1side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=4, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step ) ######################################################################################### ############################################################################################################################################################################################### if(__name__ == "__main__"): import numpy as np print("build_model cost time:", time.time() - start_time) data = np.zeros(shape=(1, 512, 512, 1)) use_model = ch032_pyramid_1side_4 use_model = use_model.build() result = use_model.generator(data) print(result.shape) from kong_util.tf_model_util import Show_model_weights Show_model_weights(use_model.generator) use_model.generator.summary()
[ "s89334roy@yahoo.com.tw" ]
s89334roy@yahoo.com.tw
43506dfa09f5bd26109c821562d504b8dc7d7c6d
6e357be547cbeb153c4778e3552716f40a21b007
/문제풀이/최빈수 구하기/최빈수 구하기.py
4cbf3d1271b85b37526f4112add6f9b8fb5b88c7
[]
no_license
hyunsang-ahn/algorithm
db6a8fdf77806f06652a3f4c2e3234b50c1bb717
c3a3c93c452e4b35202529e3209a26fbdc0c2ad7
refs/heads/master
2021-10-22T04:44:09.808795
2019-03-08T06:50:30
2019-03-08T06:50:30
null
0
0
null
null
null
null
UTF-8
Python
false
false
437
py
import sys sys.stdin = open('input.txt', 'r') T = int(input()) for tc in range(1,T+1): n = int(input()) arr = list(map(int, input().split())) num_dic = {} for i in arr: num_dic[i] = arr.count(i) res = max(list(num_dic.values())) print("#", end="") print(tc, end=" ") res_list = [] for k, v in num_dic.items(): if v == res: res_list.append(k) print(max(res_list))
[ "happylovetkd@naver.com" ]
happylovetkd@naver.com
b4ea18da6321d5abc400a5a0d5cc3b4564cb4c65
eff5cd25fa442b70491262bada0584eaaf8add46
/tfx/components/base/executor_spec.py
71518533a6ad04f0c3753e2968b7c9a2eb32c332
[ "Apache-2.0" ]
permissive
fsx950223/tfx
c58e58a85e6de6e9abcb8790acbf36424b5b2029
527fe2bab6e4f62febfe1a2029358fabe55f418c
refs/heads/master
2021-01-04T12:12:51.010090
2020-01-26T04:43:14
2020-01-26T04:43:14
240,543,231
1
0
Apache-2.0
2020-02-14T15:48:12
2020-02-14T15:48:11
null
UTF-8
Python
false
false
2,989
py
# Lint as: python2, python3 # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Executor specifications for defining what to to execute.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc from typing import List, Text, Type from six import with_metaclass from tfx.components.base import base_executor from tfx.utils import json_utils class ExecutorSpec(with_metaclass(abc.ABCMeta, json_utils.Jsonable)): """A specification for a component executor. An instance of ExecutorSpec describes the implementation of a component. """ class ExecutorClassSpec(ExecutorSpec): """A specification of executor class. Attributes: executor_class: a subclass of base_executor.BaseExecutor used to execute this component (required). """ def __init__(self, executor_class: Type[base_executor.BaseExecutor]): if not executor_class: raise ValueError('executor_class is required') self.executor_class = executor_class super(ExecutorClassSpec, self).__init__() class ExecutorContainerSpec(ExecutorSpec): """A specifcation of a container. The spec includes image, command line entrypoint and arguments for a container. For example: spec = ExecutorContainerSpec( image='docker/whalesay', command=['cowsay'], args=['hello wolrd']) Attributes: image: Container image that has executor application. Assumption is that this container image is separately release-managed, and tagged/versioned accordingly. command: Container entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. The Jinja templating mechanism is used for constructing a user-specified command-line invocation based on input and output metadata at runtime. args: Arguments to the container entrypoint. The docker image's CMD is used if this is not provided. The Jinja templating mechanism is used for constructing a user-specified command-line invocation based on input and output metadata at runtime. """ def __init__(self, image: Text, command: List[Text] = None, args: List[Text] = None): if not image: raise ValueError('image cannot be None or empty.') self.image = image self.command = command self.args = args super(ExecutorContainerSpec, self).__init__()
[ "tensorflow-extended-team@google.com" ]
tensorflow-extended-team@google.com
bdbdec718b149a290f3085cd6eb64c14f9d426c8
53fab060fa262e5d5026e0807d93c75fb81e67b9
/backup/user_014/ch58_2020_04_21_19_49_08_495191.py
1d881d5e8a3270339654288032b617d7b21bba59
[]
no_license
gabriellaec/desoft-analise-exercicios
b77c6999424c5ce7e44086a12589a0ad43d6adca
01940ab0897aa6005764fc220b900e4d6161d36b
refs/heads/main
2023-01-31T17:19:42.050628
2020-12-16T05:21:31
2020-12-16T05:21:31
306,735,108
0
0
null
null
null
null
UTF-8
Python
false
false
168
py
def conta_a (s): i = 0 contador = 0 while i < len(s): if s[i] == 'a': contador = contador + 1 i += 1 return contador
[ "you@example.com" ]
you@example.com
e6b2f84f0083b67ae2d203f72298316f4eff734e
44b389338c12b0dc2018d8022031b58090c58a63
/ProjectEuler/Problem034.py
410d4e54f8c5ce48b11c1c93332c1f0b88526652
[]
no_license
llcawthorne/old-python-learning-play
cbe71b414d6fafacec7bad681b91976648b230d3
5241613a5536cd5c086ec56acbc9d825935ab292
refs/heads/master
2016-09-05T17:47:47.985814
2015-07-13T01:25:44
2015-07-13T01:25:44
38,983,419
0
0
null
null
null
null
UTF-8
Python
false
false
605
py
#!/usr/bin/env python3 """Project Euler Problem 034 145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145. Find the sum of all numbers which are equal to the sum of the factorial of their digits. Note: as 1! = 1 and 2! = 2 are not sums they are not included. """ import math # 9! * 8 is only a 7 digit number # so we don't have to go larger than this fnine = math.factorial(9)*7 x = 145 curious = [] for x in range(10,fnine): n = 0 for digit in str(x): n += math.factorial(int(digit)) if x == n: curious.append(x) print('The curious numbers sum to',sum(curious))
[ "LLC@acm.org" ]
LLC@acm.org
77216f0de3ae8de629843bafb7defbf5cc5f8d29
200ec10b652f9c504728890f6ed7d20d07fbacae
/forms.py
0becace3be7e76cd20e14cf95376646cfe47f020
[]
no_license
Ks-Ksenia/flask_shop
f4edc17669c29ae02a89e836c3c48230147ae84f
9eb44fd22bf99913c9824ea35e3922cb14ef2451
refs/heads/master
2023-03-01T13:55:20.749127
2021-02-14T09:29:04
2021-02-14T09:29:04
338,767,599
0
0
null
null
null
null
UTF-8
Python
false
false
3,349
py
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, BooleanField, \ SubmitField, SelectField, TextAreaField, FormField, FieldList from wtforms.validators import Email, DataRequired, Length, EqualTo, required from wtforms.fields.html5 import TelField, EmailField from wtforms.widgets import CheckboxInput class LoginForm(FlaskForm): email = StringField('Email', validators=[Email('Некорректный email')]) password = PasswordField('Пароль', validators=[DataRequired(), Length(min=5, max=100, message='Пароль должен быть от 5 до 100 символов')]) remember = BooleanField('Запомнить меня', default=False) submit = SubmitField('Войти') class RegistrationForm(FlaskForm): username = StringField('Имя', validators=[DataRequired(), Length(max=100, message='Имя не должно превышать 100 символов')]) email = StringField('Email', validators=[Email('Некорректный email')]) password1 = PasswordField('Пароль', validators=[DataRequired(), Length(min=5, max=100, message='Пароль должен быть от 5 до 100 символов')]) password2 = PasswordField('Повторите пароль', validators=[DataRequired(), Length(min=5, max=100, message='Пароль должен быть от 5 до 100 символов'), EqualTo('password1', message='Пароли не совпадают')]) submit = SubmitField('Зарегистрироваться') class OrderForm(FlaskForm): CHOICE = [('Самовывоз', 'Самовывоз'), ('Доставка', 'Доставка')] first_name = StringField('Имя', validators=[DataRequired(), Length(max=100, message='Максимальная длинна имени 100 символов')]) last_name = StringField('Фамилия', validators=[DataRequired(), Length(max=100, message='Максимальная длинна фамилии 100 символов')]) email = EmailField('Email', validators=[Email('Некорректный email')]) phone = TelField('Телефон', validators=[DataRequired(), Length(max=12, message='Максимальная длина номера телефона 12 символов')]) delivery = SelectField('Доставка', choices=CHOICE) address = TextAreaField('Адрес доставки') submit = SubmitField('Подтвердить заказ') class SortForm(FlaskForm): SORT = [('id', 'умолчанию'), ('price', 'возрастанию цены'), ('-price', 'убыванию цены')] sort = SelectField('Сортировать по:', choices=SORT) min_price = StringField('Цена от') max_price = StringField('Цена до') exist = BooleanField('В наличие') submit = SubmitField('Применить')
[ "demag74@mail.ru" ]
demag74@mail.ru
9f07b90ac5d6626a6da98f349aa934ab9f7f771f
e2e1732b6eb1a7a6dfeba76762851ad06eb8e482
/wangban/wangban/spiders/redisspider.py
eabd738eb7841186dc293026c518f1ae4a617c48
[]
no_license
nightqiuhua/bigCrawlers
551e80d55df492c89ae0e0e0bd70c0e5f873068d
19b86130c8af057d06014865d150e3d2ed6cc319
refs/heads/main
2023-03-23T01:13:26.021850
2021-03-03T15:09:28
2021-03-03T15:09:28
344,165,235
0
0
null
null
null
null
UTF-8
Python
false
false
4,206
py
from scrapy import signals from scrapy.exceptions import DontCloseSpider from wangban_utils.redis_util import get_redis_conn from scrapy.spiders import Spider from items import HangzhouItem import socket from datetime import datetime import os from urllib.parse import urljoin import json import time from wangban_utils.mongo_mysql_util import MongoDB_To_MySQL from modify_func import all_modify_func from wangban_utils.mongo_util import MongodbClass from scrapy.utils.project import get_project_settings from . import workers SETTINGS = get_project_settings() class RedisSpider(Spider): name = 'redisspider' def __init__(self): super().__init__() self.redis_conn = get_redis_conn() #self.to_mysql = MongoDB_To_MySQL(self.name) self.redis_batch_size = 100 self.work_queue = SETTINGS['URLS_WORK_TASKS'] self.check_queue = SETTINGS['URLS_CHECK_TASKS'] #self.sche_updator = UpdateFilterClass(self.name) self.pre_suf = None self.workers = dict(workers) def start_requests(self): return self.next_requests() def schedule_to_works(self): found = 0 while found < self.redis_batch_size: data = self.redis_conn.lpop(self.check_queue) if not data: break self.redis_conn.rpush(self.work_queue,data) found +=1 @classmethod def from_crawler(cls,crawler,*args,**kwargs): spider = super().from_crawler(crawler,*args,**kwargs) crawler.signals.connect(spider._spider_opened,signal=signals.spider_opened) crawler.signals.connect(spider._spider_idle,signal=signals.spider_idle) #crawler.signals.connect(spider._spider_closed,signal=signals.spider_closed) return spider def _spider_opened(self,spider): pass def _spider_idle(self,spider): self.schedule_next_requests() raise DontCloseSpider def schedule_next_requests(self): for req in self.next_requests(): self.crawler.engine.crawl(req, spider=self) #self.worker_spider 工作的爬虫 例如安吉的爬虫 def next_requests(self): fetch_one = self.redis_conn.lpop found = 0 #self.schedule_to_works() while found < self.redis_batch_size: data = fetch_one(self.work_queue) if not data: break links_dict = json.loads(data.decode('utf-8')) worker_spider = self.workers[links_dict['name']]() # 根据link_url 指定 worker_spider,workers 包含所有的爬虫实例 yield worker_spider.generate_request(links_dict=links_dict,spider= self) found += 1 def parse(self,response): worker_spider = self.workers[response.meta['name']]() items = HangzhouItem() #print('response.url',response.url) try: items['url'] = response.url items['project'] = 'hangzhou' items['spider'] = worker_spider.name items['server'] = socket.gethostname() items['crawling_date'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) #primary fields items['source_website'] = worker_spider.source_website items['website_area'] = worker_spider.specific_area items['specific_area'] = response.meta['an_county'] items['an_type'] = response.meta['an_type'] items['an_major'] = response.meta['an_major'] items['an_sub'] = response.meta['an_sub'] items['project_title']=worker_spider.an_title_parse(response) items['on_date'] = worker_spider.an_on_date_parse(response) items['an_title'] = worker_spider.an_title_parse(response) items['an_url'] = worker_spider.final_url(response) items['an_refer_url'] = response.meta['an_refer_url'] items['crawling_number'] = '1' items['an_content'] = worker_spider.an_content(response) items['code'] = 'NONE' except Exception as e: print('parse error',response.url) print('parse error',e) else: return items
[ "1320551630@qq.com" ]
1320551630@qq.com
8e0110d97e1eef72cc626068e9dc37da471245a5
3b2940c38412e5216527e35093396470060cca2f
/top/api/rest/TmallTraderateFeedsGetRequest.py
5b6fa08b6e0e7bdd1d538ab76409fcdd3e1390ad
[]
no_license
akingthink/goods
842eb09daddc2611868b01ebd6e330e5dd7d50be
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
refs/heads/master
2021-01-10T14:22:54.061570
2016-03-04T09:48:24
2016-03-04T09:48:24
45,093,302
0
0
null
null
null
null
UTF-8
Python
false
false
325
py
''' Created by auto_sdk on 2015-01-20 12:44:31 ''' from top.api.base import RestApi class TmallTraderateFeedsGetRequest(RestApi): def __init__(self,domain='gw.api.taobao.com',port=80): RestApi.__init__(self,domain, port) self.child_trade_id = None def getapiname(self): return 'tmall.traderate.feeds.get'
[ "yangwenjin@T4F-MBP-17.local" ]
yangwenjin@T4F-MBP-17.local
b65a444228c179437b655748fdf4aa97e4c9f16b
86f8bf3933208329eb73bfcba5e1318dbb2ddafa
/hello_world/django/benckmark/settings.py
59dcfe2806efd190dafb6247412c676e1aacfc40
[]
no_license
TakesxiSximada/benchmarks
42ce5466c813e45db78f87ca391806fbb845a16c
9cd2fc732ed006fd3554e01b1fc71bfcb3ada312
refs/heads/master
2021-01-15T23:02:14.063157
2015-05-30T18:52:08
2015-05-30T18:52:08
36,551,678
0
0
null
null
null
null
UTF-8
Python
false
false
2,237
py
""" Django settings for benckmark project. Generated by 'django-admin startproject' using Django 1.8.dev20141227141312. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '@d0)=*&5(ld#322i*2h#0t#!%d(c01t_eg!*nqla9m2qid%$4_' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False TEMPLATE_DEBUG = True ALLOWED_HOSTS = [ 'localhost', '127.0.0.1', ] # Application definition INSTALLED_APPS = ( # 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'benckmark.urls' WSGI_APPLICATION = 'benckmark.wsgi.application' # Database # https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/dev/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/dev/howto/static-files/ STATIC_URL = '/static/'
[ "takesxi.sximada@gmail.com" ]
takesxi.sximada@gmail.com
eeee37bc3e3fdfee9d00d5b5551c681844a5fc81
65c539e235155b15946cbc7f8838bf69f56086c0
/learn_torch/mini_cnn.py
31436782fa363b25b8c0d230125bba80acf52950
[]
no_license
xkcomeon/any-whim
08a070a8ae8d795cb76e77f0f0f61edea7e5d60e
ce7160686d3689fbd4350420d1f130d7cce5c2c4
refs/heads/master
2023-03-20T08:16:43.446082
2021-03-14T14:48:02
2021-03-14T14:48:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,614
py
# 写一个中文的单字识别的cnn # 尽可能的搞定旋转缩放之类的分类问题 import os import cv2 import numpy as np # 读取单字图片文件 def read_imginfos(file): # 目前读取的数据是单字识别,这里读取的格式为,图片文件的第一个汉字代表了其类别 class_types = set() imginfos = [] for i in os.listdir(file): if i.endswith('.jpg') or i.endswith('.png'): class_types.add(i[0]) for i in os.listdir(file): if i.endswith('.jpg') or i.endswith('.png'): fil = os.path.join(file, i) img = cv2.imdecode(np.fromfile(fil, dtype=np.uint8), 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # [y,x,c] img = cv2.resize(img, (40, 40)) img = np.transpose(img, (2,1,0)) # [c,x,y] imginfo = {} imginfo['class'] = i[0] imginfo['img'] = img imginfos.append(imginfo) # cv2.imshow('test', img) # cv2.waitKey(0) class_types = {tp: idx for idx, tp in enumerate(sorted(class_types))} return imginfos, class_types # 生成 y_true 用于误差计算 def make_y_true(imginfo, class_types): img = imginfo['img'] class_types.get(imginfo['class']) clz = [0.]*len(class_types) clz[class_types.get(imginfo['class'])] = 1. return torch.FloatTensor(clz) def load_data(filepath): imginfos, class_types = read_imginfos(filepath) train_data = [] for imginfo in imginfos: train_data.append([torch.FloatTensor(imginfo['img']), make_y_true(imginfo, class_types)]) return train_data, class_types import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as Data from torch.autograd import Variable from collections import OrderedDict USE_CUDA = True if torch.cuda.is_available() else False DEVICE = 'cuda' if USE_CUDA else 'cpu' torch.set_printoptions(precision=2, sci_mode=False, linewidth=120, profile='full') class MiniCNN(nn.Module): class ConvBN(nn.Module): def __init__(self, cin, cout, kernel_size=3, stride=1, padding=None): super().__init__() padding = (kernel_size - 1) // 2 if not padding else padding self.conv = nn.Conv2d(cin, cout, kernel_size, stride, padding, bias=False) self.bn = nn.BatchNorm2d(cout, momentum=0.01) self.relu = nn.LeakyReLU(0.1, inplace=True) def forward(self, x): return self.relu(self.bn(self.conv(x))) def __init__(self, class_types, inchennel=3): super().__init__() self.oceil = len(class_types) self.model = nn.Sequential( OrderedDict([ ('ConvBN_0', self.ConvBN(inchennel, 32)), ('Pool_0', nn.MaxPool2d(2, 2)), ('ConvBN_1', self.ConvBN(32, 64)), ('Pool_1', nn.MaxPool2d(2, 2)), ('ConvBN_2', self.ConvBN(64, 128)), ('Pool_2', nn.MaxPool2d(2, 2)), ('ConvBN_3', self.ConvBN(128, 256)), ('Flatten', nn.Flatten()), ('Linear', nn.Linear(6400, self.oceil)), ]) ) def forward(self, x): x = torch.sigmoid(self.model(x)) return x class miniloss(nn.Module): def __init__(self, class_types): super().__init__() self.clazlen = len(class_types) def forward(self, pred, targ, callback=None): loss = F.mse_loss(pred,targ,reduction='sum') global print print = callback if callback else print print(loss) return loss def train(train_data, class_types): EPOCH = 10 BATCH_SIZE = 100 LR = 0.001 net = MiniCNN(class_types).to(DEVICE) mloss = miniloss(class_types).to(DEVICE) optimizer = torch.optim.Adam(net.parameters(), lr=LR) train_loader = Data.DataLoader( dataset=train_data, batch_size=BATCH_SIZE, shuffle=True, ) for epoch in range(EPOCH): print('epoch', epoch) for step, (b_x, b_y) in enumerate(train_loader): b_x = Variable(b_x).to(DEVICE) b_y = Variable(b_y).to(DEVICE) loss = mloss(net(b_x), b_y) optimizer.zero_grad() loss.backward() optimizer.step() state = {'net':net.state_dict(), 'optimizer':optimizer, 'epoch':epoch+1, 'class_types':class_types} torch.save(state, 'net.pkl') print('save.') print('end.') def load_state(filename): state = torch.load(filename) class_types = state['class_types'] net = MiniCNN(class_types) net.load_state_dict(state['net']) net.to(DEVICE) net.eval() state['net'] = net return state def test(filename, state): net = state['net'].to(DEVICE) class_types = state['class_types'] img = cv2.imdecode(np.fromfile(filename, dtype=np.uint8), 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # [y,x,c] img = cv2.resize(img, (40, 40)) img = np.transpose(img, (2,1,0)) # [c,x,y] x = torch.FloatTensor(img).unsqueeze(0).to(DEVICE) v = net(x) if USE_CUDA: v = v.cpu().detach().numpy() else: v = v.detach().numpy() v = v[0].tolist() r = sorted(class_types)[v.index(max(v))] print(v) print(r) return r # train_data, class_types = load_data('./train_img') # train(train_data, class_types) print('loading model.') state = load_state('net.pkl') print('loading model. ok.') test('./train_img/你_00_30_(255, 255, 255)_(0, 0, 255)_simsun.ttc.jpg', state)
[ "opaquism@hotmail.com" ]
opaquism@hotmail.com
844498d5a39b662dec59c8f2370751615f418417
3365e4d4fc67bbefe4e8c755af289c535437c6f4
/.history/src/core/dialogs/waterfall_dialog_20170814144141.py
dbaa69a0f387bec839f238e48d9d55b0e64da868
[]
no_license
kiranhegde/OncoPlotter
f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1
b79ac6aa9c6c2ca8173bc8992ba3230aa3880636
refs/heads/master
2021-05-21T16:23:45.087035
2017-09-07T01:13:16
2017-09-07T01:13:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,426
py
''' Refs: Embedding plot: https://sukhbinder.wordpress.com/2013/12/16/simple-pyqt-and-matplotlib-example-with-zoompan/ ''' from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar import matplotlib.pyplot as plt from PyQt5.QtWidgets import (QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem) from PyQt5 import QtCore, QtGui import core.gui.waterfall as waterfall import numpy as np from pprint import pprint class Waterfall(QWidget, waterfall.Ui_Waterfall): general_settings_signal = QtCore.pyqtSignal(list) #send list of plotting params updated_rectangles_signal = QtCore.pyqtSignal(list) #send list of updated artists for redrawing def __init__(self, parent): super(Waterfall,self).__init__(parent) self.setupUi(self) #Button functions self.btn_apply_general_settings.clicked.connect(self.send_settings) self.patient_tree = self.create_patient_tree() self.data_viewer_container.addWidget(self.patient_tree) def on_waterfall_data_signal(self,signal): self.waterfall_data = signal['waterfall_data'] #pandas dataframe def on_generated_rectangles_signal(self,signal): self.rectangles_received = signal[0] self.add_items(self.rectangles_received) #display in table #print(self.rectangles_received) def send_settings(self,signal): self.list_general_settings = [ self.plot_title.text(), self.x_label.text(), self.y_label.text(), self.twenty_percent_line.isChecked(), self.thirty_percent_line.isChecked(), self.zero_percent_line.isChecked(), self.display_responses_as_text.isChecked() ] self.general_settings_signal.emit(self.list_general_settings) def create_patient_tree(self): ''' Create QTreeWidget populated with a patient's data for the DataEntry dialog. Assumes that self.temp_patient is the patient of interest and that the variable belongs to the dialog. ''' self.tree = QTreeWidget() self.root = self.tree.invisibleRootItem() self.headers = [ 'Patient #', 'Best response %', 'Overall response', 'Cancer type' ] self.headers_item = QTreeWidgetItem(self.headers) self.tree.setColumnCount(len(self.headers)) self.tree.setHeaderItem(self.headers_item) self.root.setExpanded(True) #self.addItems() #self.tree.header().setResizeMode(QtGui.QHeaderView.ResizeToContents) #self.tree.header().setStretchLastSection(False) return self.tree def add_items(self,): ''' Populate viewing tree ''' for rect in self.rectangles_received: #populate editable tree with rect data #column = 0 #self.rect_item = QTreeWidgetItem(self.root) #self.rect_params = [rect.get_label] print(rect.get_label()) class WaterfallPlotter(QWidget): generated_rectangles_signal = QtCore.pyqtSignal(list) #send list of rects for data display in tree def __init__(self,parent): super(WaterfallPlotter,self).__init__(parent) self.figure = plt.figure() self.canvas = FigureCanvas(self.figure) self.toolbar = NavigationToolbar(self.canvas,self) self.btn_plot = QPushButton('Default Plot') self.btn_plot.clicked.connect(self.default_plot) self.layout = QVBoxLayout() self.layout.addWidget(self.toolbar) self.layout.addWidget(self.canvas) self.layout.addWidget(self.btn_plot) self.setLayout(self.layout) def on_waterfall_data_signal(self,signal): self.waterfall_data = signal['waterfall_data'] #pandas dataframe self.btn_plot.setEnabled(True) def on_general_settings_signal(self,signal): try: hasattr(self,'ax') self.ax.set_title(signal[0]) self.ax.set_xlabel(signal[1]) self.ax.set_ylabel(signal[2]) self.canvas.draw() except Exception as e: print(e) def default_plot(self): ''' Plot waterfall data ''' self.figure.clear() self.rect_locations = np.arange(len(self.waterfall_data['Best response percent change'])) self.ax = self.figure.add_subplot(111) self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent') self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent') self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent') self.ax.grid(color = 'k', axis = 'y', alpha=0.25) self.rects = self.ax.bar(self.rect_locations,self.waterfall_data['Best response percent change'],label=self.waterfall_data['Patient number']) self.auto_label_responses(self.ax, self.rects, self.waterfall_data) #self.plot_table() self.canvas.draw() self.ax.hold(False) #rewrite the plot when plot() called self.generated_rectangles_signal.emit([self.rects]) def plot_table(self): rows = ['%s' % x for x in self.waterfall_data.keys()] rows = rows[4:] #skip first three, they are the 4 standard headers, rest are table rows columns = self.waterfall_data['Patient number'] #patient numbers cell_text = [] for row in rows: cell_text_temp = [] for col in range(len(columns)): cell_text_temp.append(self.waterfall_data[row][col]) cell_text.append(cell_text_temp) the_table = plt.table(cellText=cell_text, rowLabels=rows, colLabels=columns, loc='bottom', cellLoc='center') plt.subplots_adjust(bottom=0.15,left=0.5) self.ax.set_xlim(-0.5,len(columns)-0.5) plt.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off labelbottom='off' ) # labels along the bottom edge are off def update_plot(self): ''' TODO ''' pass def auto_label_responses(self, ax, rects, waterfall_data): '''Add labels above/below bars''' i = 0 for rect in rects: height = rect.get_height() if height >= 0: valign = 'bottom' else: valign = 'top' ax.text(rect.get_x() + rect.get_width()/2., height, '%s' % waterfall_data['Overall response'][i], ha='center', va=valign) i+=1
[ "ngoyal95@terpmail.umd.edu" ]
ngoyal95@terpmail.umd.edu
49ede39dee06aaa502ae7e4507c81c91e49f634c
dbbdf35bff726681ae34ad08eeda5f30929e2ae9
/supervised_learning/0x00-binary_classification/8-neural_network.py
6f3dce07188aef74a022e7625d340e1657f4161c
[]
no_license
jorgezafra94/holbertonschool-machine_learning
0b7f61c954e5d64b1f91ec14c261527712243e98
8ad4c2594ff78b345dbd92e9d54d2a143ac4071a
refs/heads/master
2023-02-03T20:19:36.544390
2020-12-21T21:49:10
2020-12-21T21:49:10
255,323,504
1
1
null
null
null
null
UTF-8
Python
false
false
1,439
py
#!/usr/bin/env python3 """ neural network with one hidden layer performing binary classification: """ import numpy as np class NeuralNetwork(): """ Here is my first Holberton NeuralNetwork Class here we are going to use One hidden layer the main things to keep in mind about a neuron is the ecuation y = sum(w.x) + b where w are the weights in this case W x are the inputs in this case nx b are the biases A is the activated output of the neuron """ def __init__(self, nx, nodes): """ constructor of class nx is the number of input features to the neuron nodes is the number of nodes found in the hidden layer """ if type(nx) is not int: raise TypeError("nx must be an integer") if nx < 1: raise ValueError("nx must be a positive integer") if type(nodes) is not int: raise TypeError("nodes must be an integer") if nodes < 1: raise ValueError("nodes must be a positive integer") # input layer parameters self.W1 = np.random.randn(nx, nodes) self.W1 = self.W1.reshape(nodes, nx) self.b1 = np.zeros(nodes) self.b1 = self.b1.reshape(nodes, 1) self.A1 = 0 # hidden layer parameters self.W2 = np.random.randn(nodes) self.W2 = self.W2.reshape(1, nodes) self.b2 = 0 self.A2 = 0
[ "947@holbertonschool.com" ]
947@holbertonschool.com
78d93dc4ea64e77ecc661aa7c3d513d1758e2027
53fab060fa262e5d5026e0807d93c75fb81e67b9
/backup/user_109/ch27_2019_08_14_18_40_36_940230.py
4266c60b91cf6eb8413093db272fd574351c5621
[]
no_license
gabriellaec/desoft-analise-exercicios
b77c6999424c5ce7e44086a12589a0ad43d6adca
01940ab0897aa6005764fc220b900e4d6161d36b
refs/heads/main
2023-01-31T17:19:42.050628
2020-12-16T05:21:31
2020-12-16T05:21:31
306,735,108
0
0
null
null
null
null
UTF-8
Python
false
false
215
py
def n(a, c): tempo = ((a*365)*c)*10 tempo = (tempo/3600) return tempo a = int(input('Insira há quantos anos você fuma: ')) c = int(input('Insira quantos cigarros você usa por dia: ')) print(n(a, c))
[ "you@example.com" ]
you@example.com
22483da0f47eecf1240954f46e0762ee7f16d6d1
97cb12cc1243ffa1e29c98ed013a03b377d0e9cd
/setup.py
1e3612c418f41668b0949274eda9949bd59dac26
[ "MIT" ]
permissive
brettatoms/flask-appconfig
675097323f06b7dcd40f6e8225991e24a349d4a3
3e023569f156166abe3992bb3abe0fdad1c38630
refs/heads/master
2021-01-18T04:47:44.432958
2015-07-17T23:02:04
2015-07-17T23:02:04
null
0
0
null
null
null
null
UTF-8
Python
false
false
918
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from setuptools import setup, find_packages def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name='flask-appconfig', version='0.11.0.dev1', description=('Configures Flask applications in a canonical way. Also auto-' 'configures Heroku. Aims to standardize configuration.'), long_description=read('README.rst'), author='Marc Brinkmann', author_email='git@marcbrinkmann.de', url='http://github.com/mbr/flask-appconfig', license='MIT', packages=find_packages(exclude=['tests']), install_requires=['flask', 'six', 'click'], entry_points={ 'console_scripts': [ 'flask = flask_appconfig.cli:cli', ], }, classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ] )
[ "git@marcbrinkmann.de" ]
git@marcbrinkmann.de
a897a4faf3b4f6073bc4505c1ef6077f10815dac
b4cfb1f9813df98a791c0dfeab5183996f900f13
/core/forms.py
34183f3bf2d149ad1d532f1b606456e800d6fc30
[]
no_license
KiwiState/TestDjango
6fedf22603950bd4345dc6dc93ed7ab67c811769
c09ef5f02cf4297d97393cd3a495475478e023ef
refs/heads/master
2023-06-09T20:26:16.070432
2021-07-08T13:49:05
2021-07-08T13:49:05
377,001,555
0
0
null
null
null
null
UTF-8
Python
false
false
535
py
from django import forms from django.forms import ModelForm from .models import Pintura,Contacto,Usuarios class PinturaForm(ModelForm): class Meta: model = Pintura fields = ['titulo','descripcion','categoria','imagen','id_pintura','id'] class ContactoForm(ModelForm): class Meta: model = Contacto fields = ['email','titulo','descripcion'] class UsuariosForm(ModelForm): class Meta: model = Usuarios fields = ['nombre','correo','sexo','edad','artista','id']
[ "unconfigured@null.spigotmc.org" ]
unconfigured@null.spigotmc.org
f9c00636facae578d804121b09edd4a003c90297
cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98
/leet/amazon/strings_and_arrays/454_4Sum_II.py
39caa53fc72f97bde6109f3bfd501894873bb480
[]
no_license
arsamigullin/problem_solving_python
47715858a394ba9298e04c11f2fe7f5ec0ee443a
59f70dc4466e15df591ba285317e4a1fe808ed60
refs/heads/master
2023-03-04T01:13:51.280001
2023-02-27T18:20:56
2023-02-27T18:20:56
212,953,851
0
0
null
null
null
null
UTF-8
Python
false
false
1,724
py
import collections from typing import List class Solution: def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int: dp = collections.defaultdict(int) for a in A: for b in B: dp[a+b]+=1 cnt = 0 for c in C: for d in D: cnt+= dp[-(c+ d)] return cnt class Solution: def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int: # n = len(A) # sumX={} # sumY={} # for i in range(n): # for j in range(n): # sa = A[i] + B[j] # sb = C[i] + D[j] # if sa not in sumX: # sumX[sa] =0 # sumX[sa] +=1 # if sb not in sumY: # sumY[sb] =0 # sumY[sb] +=1 # total =0 # for k in sumX: # if -k in sumY: # total += sumX[k] * sumY[-k] # return total p, q, r, s = dict(), dict(), dict(), dict() for i, j, k, l in zip(A, B, C, D): p[i] = p.get(i, 0) + 1 q[j] = q.get(j, 0) + 1 r[k] = r.get(k, 0) + 1 s[l] = s.get(l, 0) + 1 sumt = dict() for i in p: for j in q: t = i + j sumt[t] = sumt.get(t, 0) + p[i] * q[j] total = 0 for i in r: for j in s: t = i + j total += sumt.get(-t, 0) * (r[i] * s[j]) return total
[ "ar.smglln@gmail.com" ]
ar.smglln@gmail.com
98ae7611ae59d9ad96d11df5d4be3d5ffffa1b93
798960eb97cd1d46a2837f81fb69d123c05f1164
/symphony/cli/pyinventory/graphql/enum/user_role.py
053133dc005e0a4f2ac8fe4e406670c5ecde4190
[ "BSD-3-Clause", "Apache-2.0" ]
permissive
kyaaqba/magma
36d5fa00ce4f827e6ca5ebd82d97a3d36e5f5b5b
fdb7be22a2076f9a9b158c9670a9af6cad68b85f
refs/heads/master
2023-01-27T12:04:52.393286
2020-08-20T20:23:50
2020-08-20T20:23:50
289,102,268
0
0
NOASSERTION
2020-08-20T20:18:42
2020-08-20T20:18:41
null
UTF-8
Python
false
false
300
py
#!/usr/bin/env python3 # @generated AUTOGENERATED file. Do not Change! from enum import Enum class UserRole(Enum): USER = "USER" ADMIN = "ADMIN" OWNER = "OWNER" MISSING_ENUM = "" @classmethod def _missing_(cls, value: object) -> "UserRole": return cls.MISSING_ENUM
[ "facebook-github-bot@users.noreply.github.com" ]
facebook-github-bot@users.noreply.github.com
e5986fafc4c92267351701bf782014ee5e3e3e90
11757061e3f4d4b6535c311ed5005b620c98d1b5
/lang/test/python/test_scope.py
f2a8fe1b5c8f0a8e2c252d5dee22b1b06f9bb5f3
[ "MIT" ]
permissive
nijeshu/taichi
580176d65cae80c87ab508a7db3f71f4815cbcdc
bd02798208b2d363e605434d10d739fe03e9c07f
refs/heads/master
2020-08-06T15:41:47.510747
2019-10-05T19:12:08
2019-10-05T19:12:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
488
py
import taichi as ti ti.runtime.print_preprocessed = True def test_scope(): # In the future the following code should throw an exception at the python front end # instead of crashing the compiler return for arch in [ti.x86_64, ti.cuda]: # ti.reset() ti.cfg.arch = arch x = ti.var(ti.f32) N = 1 @ti.layout def place(): ti.root.dense(ti.i, N).place(x) @ti.kernel def func(): if 1 > 0: val = 1 ti.print(val) func()
[ "yuanmhu@gmail.com" ]
yuanmhu@gmail.com
dcd595986262113e80ae7f4168553bdf37ea6ab5
03f1a5380641564750daa9c4de90e85e8e1c9c35
/notifier/signal.py
51d23490edad463787e514f13def9a6ded7100a9
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
stockec/siis
f4e4a792a1f6623030a3299f0978b681d7c85d52
82f54415b4ed82ff12d17a252dd6da32bc31a586
refs/heads/master
2020-07-18T11:28:26.411702
2019-09-01T22:01:59
2019-09-01T22:01:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,781
py
# @date 2018-08-07 # @author Frederic SCHERMA # @license Copyright (c) 2018 Dream Overflow # service worker class Signal(object): SIGNAL_UNDEFINED = 0 SIGNAL_SOCIAL_ENTER = 10 # broker copy position entry signal SIGNAL_SOCIAL_EXIT = 11 # broker copy position exit signal SIGNAL_SOCIAL_UPDATED = 12 # broker copy position exit signal SIGNAL_AUTHOR_ADDED = 13 SIGNAL_AUTHOR_REMOVED = 14 SIGNAL_STRATEGY_ENTRY_EXIT = 50 # data is a dict {'trader-name', 'trade-id', 'symbol', 'direction', 'price', 'symbol', 'action', 'rate', 'timestamp', ...} SIGNAL_CANDLE_DATA = 100 # data is a pair with (market_id, Candle) SIGNAL_TICK_DATA = 101 # data is a pair with (market_id, Tick) SIGNAL_CANDLE_DATA_BULK = 102 # data is a tuple of (market_id, tf, Candle[]) SIGNAL_TICK_DATA_BULK = 103 # data is a tuple of (market_id, tf, Tick[]) SIGNAL_SOCIAL_ORDER = 104 # data is a tuple with (str market id, dict position details) SIGNAL_BUY_SELL_ORDER = 105 # data is BuySellSignal SIGNAL_ORDER_BOOK = 106 # data is a tuple with (market_id, buys array, sells array) SIGNAL_WATCHER_CONNECTED = 200 # data is None SIGNAL_WATCHER_DISCONNECTED = 201 # data is None SIGNAL_ACCOUNT_DATA = 300 # data is a tuple with (balance, free_margin, pnl, currency, risk_limit) SIGNAL_MARKET_DATA = 301 # data is a tuple with (market_id, tradable, timestamp, bid, ofr, base_exchange_rate, contract_size, value_per_pip, vol24h_base, vol24h_quote) SIGNAL_MARKET_INFO_DATA = 302 # data is a tuple with (market_id, Market()) SIGNAL_MARKET_LIST_DATA = 303 # data is an array of tuples of str (market_id, symbol, base, quote) SIGNAL_POSITION_OPENED = 400 # data is a (str market id, dict position details, str ref order id) SIGNAL_POSITION_UPDATED = 401 # data is a (str market id, dict position details, str ref order id) SIGNAL_POSITION_DELETED = 402 # data is a (str market id, str position id, str ref order id) SIGNAL_POSITION_AMENDED = 403 # data is a (str market id, dict position details) SIGNAL_ORDER_OPENED = 500 # data is a (str market id, dict order details, str ref order id) SIGNAL_ORDER_UPDATED = 501 # data is a (str market id, dict order details, str ref order id) SIGNAL_ORDER_DELETED = 502 # data is a (str market id, str order id, str ref order id) SIGNAL_ORDER_REJECTED = 503 # data is a (str market id, str ref order id) SIGNAL_ORDER_CANCELED = 504 # data is a (str market id, str order id, str ref order id) SIGNAL_ORDER_TRADED = 505 # data is a (str market id, dict order details, str ref order id) SIGNAL_ASSET_DATA = 600 # data is a tuple with (asset_id, asset object) SIGNAL_ASSET_DATA_BULK = 601 # data is an array of Asset objects SIGNAL_ASSET_UPDATED = 602 # data is a tuple with (asset_id, locked_balance, free_balance) SIGNAL_STRATEGY_TRADE_LIST = 700 # data is a an array of tuple with (market_id, integer trade_id, integer trade_type, dict data, dict operations) SIGNAL_STRATEGY_TRADER_LIST = 701 # data is a an array of tuple with (market_id, boolean activity, dict data, dict regions) SOURCE_UNDEFINED = 0 SOURCE_WATCHER = 1 SOURCE_TRADER = 2 SOURCE_STRATEGY = 3 SOURCE_MONITOR = 4 def __init__(self, source, source_name, signal_type, data): self._source = source self._source_name = source_name self._signal_type = signal_type self._data = data @property def source(self): return self._source @property def source_name(self): return self._source_name @property def signal_type(self): return self._signal_type @property def data(self): return self._data
[ "frederic.scherma@gmail.com" ]
frederic.scherma@gmail.com
91b5d980d481c438768b83a4d5b7df79eea6bc96
6189f34eff2831e3e727cd7c5e43bc5b591adffc
/WebMirror/management/rss_parser_funcs/feed_parse_extractRemnancetlCom.py
2f4d11c779dbeaec9f911463a4b61cbe7a01a777
[ "BSD-3-Clause" ]
permissive
fake-name/ReadableWebProxy
24603660b204a9e7965cfdd4a942ff62d7711e27
ca2e086818433abc08c014dd06bfd22d4985ea2a
refs/heads/master
2023-09-04T03:54:50.043051
2023-08-26T16:08:46
2023-08-26T16:08:46
39,611,770
207
20
BSD-3-Clause
2023-09-11T15:48:15
2015-07-24T04:30:43
Python
UTF-8
Python
false
false
541
py
def extractRemnancetlCom(item): ''' Parser for 'remnancetl.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
[ "something@fake-url.com" ]
something@fake-url.com
16f944de1e71ab6bfb26e39281def4a786a69efa
7a704e838d89f942a1099fec141f1fbe9828e528
/v2_plugin/example/maskrcnn-service/engine.py
030d0989e1e8c9dbb2e97a53f39056e2bf88d710
[ "Apache-2.0" ]
permissive
cap-ntu/Video-to-Retail-Platform
3ee00d22b7fd94925adac08c5ea733ee647f4574
757c68d9de0778e3da8bbfa678d89251a6955573
refs/heads/hysia_v2
2023-02-14T05:22:16.792928
2021-01-10T02:31:43
2021-01-10T02:31:43
212,741,650
63
20
Apache-2.0
2021-01-10T02:32:00
2019-10-04T05:22:08
Python
UTF-8
Python
false
false
2,754
py
import ssl from pathlib import Path from typing import Dict import numpy as np import torch from maskrcnn_benchmark.config import cfg from maskrcnn_benchmark.structures.bounding_box import BoxList from common.engine import BaseEngine from .mask_rcnn_predictor import COCODemo # cancel ssl certificate verify ssl._create_default_https_context = ssl._create_unverified_context class Engine(BaseEngine): CFG_ROOT = Path(__file__).parent.absolute() / 'third/maskrcnn-benchmark/configs' def __init__(self, config): super().__init__(config) self._load_model(self.config) def _load_model(self, model_name: str): self._model_name = model_name self._config = self._load_cfg() self._model = COCODemo( cfg, min_image_size=800, confidence_threshold=0.7, ) def reset_model_version(self, model_name: str): self._load_model(model_name) def _load_cfg(self): model_path = Path(self._model_name).with_suffix('.yaml') full_path = self.CFG_ROOT / model_path print('loading configuration from {}'.format(full_path)) cfg.merge_from_file(full_path) return cfg.merge_from_list(["MODEL.DEVICE", "cuda"]) @staticmethod def decode_bbox(predictions: BoxList): """ Arguments: predictions (BoxList): the result of the computation by the model. It should contain the field `labels`. Returns: label, boxes, scores (list, list, list): a tuple containing list of labels, boxes and scores. """ # get label label_ids = predictions.get_field('labels').tolist() boxes = predictions.bbox boxes = boxes.to(torch.int64).tolist() scores = predictions.get_field('scores').tolist() if predictions.has_field('mask'): mask = predictions.get_field('mask').tolist() else: mask = None return label_ids, boxes, scores, mask def single_predict(self, np_array: np.ndarray, **kwargs) -> Dict[str, list]: height, width, _ = np_array.shape predictions = self._model.compute_prediction(np_array) top_predictions = self._model.select_top_predictions(predictions) label_ids, boxes, scores, mask = self.decode_bbox(top_predictions) labels = [self._model.CATEGORIES[i] for i in label_ids] return { 'labels': labels, 'label_ids': label_ids, 'boxes': boxes, 'scores': scores, 'mask': mask, 'width': width, 'height': height } def batch_predict(self, *args, **kwargs): print('Hello world from batch predict.')
[ "YLI056@e.ntu.edu.sg" ]
YLI056@e.ntu.edu.sg
cf4e7fec26789b171a8ebaa8eabc49d62d29ff3c
ecbc02f304557c6069637f2871d4d4c9c3e04e98
/tests/core/test_source_map.py
56189b92496db80f7f6c3b9c500dbff0e3840aca
[ "MIT" ]
permissive
charles-cooper/vyper-debug
6b6e12ccc1ca9217caff7f21bc176b69d1bc8fc5
2678de107615c705a3e55edf811b23259990d1c4
refs/heads/master
2020-05-21T01:05:39.122973
2019-05-09T17:59:46
2019-05-09T17:59:46
185,847,661
1
0
MIT
2019-05-09T17:57:59
2019-05-09T17:57:59
null
UTF-8
Python
false
false
881
py
from vdb.source_map import produce_source_map def test_source_map_output(): code = """ a_map: map(bytes32, bytes32) @public def func1(a: int128) -> int128: b: int128 = 2 c: int128 = 3 g: bytes[10] return a + b + c + 1 @public def func2(a: int128): x: uint256 """ sm = produce_source_map(code) # globals assert sm['globals']['a_map'] == { 'type': 'map(bytes32, bytes32)', 'size': 0, 'position': 0 } # locals assert sm['locals']['func1'] == { 'from_lineno': 4, 'to_lineno': 11, 'variables': { 'a': {'type': 'int128', 'size': 32, 'position': 320}, 'b': {'type': 'int128', 'size': 32, 'position': 352}, 'c': {'type': 'int128', 'size': 32, 'position': 384}, 'g': {'type': 'bytes[10]', 'size': 96, 'position': 416} }, }
[ "jacques@dilectum.co.za" ]
jacques@dilectum.co.za
ea8866cc497b0eb9b862a1e59a2461097b8b4615
2469fc10932f11bb273fc6194b0c67779b6337f3
/1. 상수형 자료형/jump_to_Python_숫자형_5월_18일.py
e74a3289fb8d5209ea04451bb4c95eb2ccd8e4b7
[]
no_license
iopasd753951/Learn_Python
925d20bbcaa871ebdc427a4e0d31ee8e81c51f72
5488d0a8e4ebd8140a488f93d31bf7a13459daaf
refs/heads/master
2020-03-25T01:01:48.908415
2018-11-05T14:38:43
2018-11-05T14:38:43
143,218,566
1
0
null
null
null
null
UTF-8
Python
false
false
340
py
# coding: utf-8 # In[5]: a = int(input()) b = int(input()) c = int(input()) total = (a + b + c) // 3 print("점수 평균 :", total) # In[6]: a = 17 // 3 print(a) # In[7]: a = 17 % 3 print(a) # In[ ]: a = int(input()) if a % 2 == 0 : print("짝수") elif a % 2 == 1 : print("홀수") else : print("몰라")
[ "iopasd753951@gmail.com" ]
iopasd753951@gmail.com
dbe515b084f3cf998bb1cdf2e0a248687292e570
3428950daafacec9539a83809cf9752000508f63
/코딩테스트책/7-6.py
75ee8b86c6f39502de4fa3772176543822e986e7
[]
no_license
HyunAm0225/Python_Algorithm
759b91743abf2605dfd996ecf7791267b0b5979a
99fb79001d4ee584a9c2d70f45644e9101317764
refs/heads/master
2023-05-24T05:29:12.838390
2021-06-15T16:36:33
2021-06-15T16:36:33
274,587,523
0
0
null
null
null
null
UTF-8
Python
false
false
783
py
# 이진 탐색 # 떡 자르기 문제 n, m = map(int,input().split()) array = list(map(int,input().split())) # 이진 탐색을 위한 시작점과 끝점 설정 start = 0 end = max(array) # 이진 탐색 수행(반복적) result = 0 while(start <=end): total = 0 mid = (start+end)//2 for x in array: # 잘랐을 때 떡의 양 계산 if x > mid: total += x - mid # 떡의 양이 부족한 경우 더 많이 자르기 (왼쪽 부분 탐색) if total < m: end = mid -1 # 떡의 양이 충분한 경우 덜 자르기(오른쪽 부분 탐색) else: result = mid # 최대한 덜 잘랐을 때가 정답이므로, 여기에서 result에 기록한다. start = mid + 1 # 정답 출력 print(result)
[ "tlfgjawlq@naver.com" ]
tlfgjawlq@naver.com
24ff3174bfa172b59160d3c30a8b202d43863cd5
1b87d5f7cba7e068f7b2ea902bba494599d20a78
/experimental/modeswitch/x11vmode.py
65e6243a8af467c60e50248a8f876db8546c600e
[ "BSD-3-Clause" ]
permissive
jpaalasm/pyglet
906d03fe53160885665beaed20314b5909903cc9
bf1d1f209ca3e702fd4b6611377257f0e2767282
refs/heads/master
2021-01-25T03:27:08.941964
2014-01-25T17:50:57
2014-01-25T17:50:57
16,236,090
2
2
null
null
null
null
UTF-8
Python
false
false
7,453
py
#!/usr/bin/env python ''' ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' import ctypes import os import fcntl import mutex import time import select import struct import signal import sys import threading from pyglet.window.xlib import xlib import lib_xf86vmode as xf86vmode class ModeList(object): invalid = True def __init__(self, x_display, x_screen): self.x_display = x_display self.x_screen = x_screen self.display_name = xlib.XDisplayString(self.x_display) @classmethod def from_screen(cls, screen): display = screen.display return cls(display._display, screen._x_screen_id) def _validate(self): if not self.invalid: return count = ctypes.c_int() modes = ctypes.POINTER(ctypes.POINTER(xf86vmode.XF86VidModeModeInfo))() xf86vmode.XF86VidModeGetAllModeLines(self.x_display, self.x_screen, count, modes) # Copy modes out of list and free list self.modes = [] for i in range(count.value): mode = xf86vmode.XF86VidModeModeInfo() ctypes.memmove(ctypes.byref(mode), ctypes.byref(modes.contents[i]), ctypes.sizeof(mode)) self.modes.append(mode) if mode.privsize: xlib.XFree(mode.private) xlib.XFree(modes) self.invalid = False def _mode_packet(self, mode): return ModePacket(self.display_name, self.x_screen, mode.hdisplay, mode.vdisplay, mode.dotclock) def get_mode(self): '''Get current mode (ModePacket)''' self._validate() return self._mode_packet(self.modes[0]) def set_mode(self, width, height, dotclock=None): '''Set mode closest to requested width, height and dotclock (if specified). Actual mode is returned. Exception is raised if width or height are above maximum. ''' self._validate() best_mode = None for mode in self.modes: if width > mode.hdisplay or height > mode.vdisplay: continue if not best_mode: best_mode = mode continue if mode.hdisplay == best_mode.hdisplay: if mode.vdisplay < best_mode.vdisplay: if (dotclock is not None and abs(dotclock - mode.dotclock) < abs(dotclock - best_mode.dotclock)): best_mode = mode elif mode.vdisplay < best_mode.vdisplay: best_mode = mode elif mode.hdisplay < best_mode.hdisplay: best_mode = mode if best_mode is None: raise Exception('No mode is in range of requested resolution.') xf86vmode.XF86VidModeSwitchToMode(self.x_display, self.x_screen, best_mode) xlib.XFlush(self.x_display) self.invalid = True return self._mode_packet(best_mode) # Mode packets tell the child process how to restore a given display and # screen. Only one packet should be sent per display/screen (more would # indicate redundancy or incorrect restoration). Packet format is: # display (max 256 chars), # screen # width # height # dotclock class ModePacket(object): format = '256siHHI' size = struct.calcsize(format) def __init__(self, display_name, screen, width, height, dotclock): self.display_name = display_name self.screen = screen self.width = width self.height = height self.dotclock = dotclock def encode(self): return struct.pack(self.format, self.display_name, self.screen, self.width, self.height, self.dotclock) @classmethod def decode(cls, data): display_name, screen, width, height, dotclock = \ struct.unpack(cls.format, data) return cls(display_name.strip('\0'), screen, width, height, dotclock) def __repr__(self): return '%s(%r, %r, %r, %r, %r)' % ( self.__class__.__name__, self.display_name, self.screen, self.width, self.height, self.dotclock) def set(self): display = xlib.XOpenDisplay(self.display_name) mode_list = ModeList(display, self.screen) mode_list.set_mode(self.width, self.height, self.dotclock) xlib.XCloseDisplay(display) _restore_mode_child_installed = False _restorable_screens = set() _mode_write_pipe = None def _install_restore_mode_child(): global _mode_write_pipe global _restore_mode_child_installed if _restore_mode_child_installed: return # Parent communicates to child by sending "mode packets" through a pipe: mode_read_pipe, _mode_write_pipe = os.pipe() if os.fork() == 0: # Child process (watches for parent to die then restores video mode(s). os.close(_mode_write_pipe) # Set up SIGHUP to be the signal for when the parent dies. PR_SET_PDEATHSIG = 1 libc = ctypes.cdll.LoadLibrary('libc.so.6') libc.prctl.argtypes = (ctypes.c_int, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong) libc.prctl(PR_SET_PDEATHSIG, signal.SIGHUP, 0, 0, 0) # SIGHUP indicates the parent has died. The child mutex is unlocked, it # stops reading from the mode packet pipe and restores video modes on # all displays/screens it knows about. def _sighup(signum, frame): parent_wait_mutex.unlock() parent_wait_mutex = mutex.mutex() parent_wait_mutex.lock(lambda arg: arg, None) signal.signal(signal.SIGHUP, _sighup) # Wait for parent to die and read packets from parent pipe packets = [] buffer = '' while parent_wait_mutex.test(): data = os.read(mode_read_pipe, ModePacket.size) buffer += data # Decode packets while len(buffer) >= ModePacket.size: packet = ModePacket.decode(buffer[:ModePacket.size]) packets.append(packet) buffer = buffer[ModePacket.size:] for packet in packets: packet.set() sys.exit(0) else: # Parent process. Clean up pipe then continue running program as # normal. Send mode packets through pipe as additional # displays/screens are mode switched. os.close(mode_read_pipe) _restore_mode_child_installed = True def _set_restore_mode(mode): _install_restore_mode_child() # This is not the real restore mode if one has already been set. if (mode.display_name, mode.screen) in _restorable_screens: return os.write(_mode_write_pipe, mode.encode()) _restorable_screens.add((mode.display_name, mode.screen)) def _set_mode(screen, width, height): display_name = screen.display mode_list = ModeList.from_screen(screen) current_mode = mode_list.get_mode() _set_restore_mode(current_mode) new_mode = mode_list.set_mode(width, height) return new_mode.width, new_mode.height import pyglet window = pyglet.window.Window() _set_mode(window.screen, 800, 600) pyglet.app.run() # Trigger a segfault -- mode still gets restored thanks to child :-) print ctypes.c_char_p.from_address(0)
[ "joonas.paalasmaa@gmail.com" ]
joonas.paalasmaa@gmail.com
157bf7e582510d1c353c498ec1b026dbd39bdb35
714cfd73f40383d6a8cde7144f56c8777fafe8e3
/src/misc/features/create_tsfresh.py
17491a3e84e3976649b1f9a54c33d16a0fea2547
[ "BSD-2-Clause" ]
permissive
Ynakatsuka/g2net-gravitational-wave-detection
bed60d39534b4aced1469964369b0fec17c7b7c7
482914a64e0140f27e0058202af1fdea06f7b258
refs/heads/main
2023-09-06T00:35:07.096235
2021-10-29T07:44:34
2021-10-29T07:44:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,834
py
import multiprocessing import os import warnings import hydra import pandas as pd import tsfresh from base import BaseG2NetFeatureEngineeringDataset, G2NetFeatureEngineering from omegaconf import DictConfig class TsFreshFeatures(BaseG2NetFeatureEngineeringDataset): def _engineer_features(self, signals): df = pd.DataFrame( signals.T, columns=["channel_0", "channel_1", "channel_2"] ) df["id"] = 0 extracted_features = tsfresh.extract_features( df, column_id="id", n_jobs=0, disable_progressbar=True ) features = {} for k, v in extracted_features.items(): features[k] = v.values[0] return features @hydra.main(config_path="../../../config", config_name="default") def main(config: DictConfig) -> None: filename = __file__.split("/")[-1][:-3] input_dir = config.input_dir features_dir = config.features_dir os.makedirs(features_dir, exist_ok=True) train = pd.read_csv(config.competition.train_path) test = pd.read_csv(config.competition.test_path) train["path"] = train["id"].apply( lambda x: f"{input_dir}/train/{x[0]}/{x[1]}/{x[2]}/{x}.npy" ) test["path"] = test["id"].apply( lambda x: f"{input_dir}/test/{x[0]}/{x[1]}/{x[2]}/{x}.npy" ) num_workers = multiprocessing.cpu_count() transformer = G2NetFeatureEngineering( TsFreshFeatures, batch_size=num_workers, num_workers=num_workers ) X_train = transformer.fit_transform(train["path"]) X_test = transformer.transform(test["path"]) print(X_train.info()) X_train.to_pickle(os.path.join(features_dir, f"{filename}_train.pkl")) X_test.to_pickle(os.path.join(features_dir, f"{filename}_test.pkl")) if __name__ == "__main__": warnings.filterwarnings("ignore") main()
[ "nk.tsssa@gmail.com" ]
nk.tsssa@gmail.com
c8f9ad715eef34b8164134cbcfba734dc4d275cf
161eee91b961e3387526772233c9c63239b4af8d
/travelproject/travelapp/views.py
36c603dfff29f9e9dfc316fad551faceb00f6a5f
[]
no_license
sreekripa/travell
5ead55c80068796e297c4bf126e89e7542bbfdfc
b8085d8aaca4bfb4c09c67adea14094c777724c8
refs/heads/master
2023-05-02T23:18:28.387407
2021-05-17T08:49:54
2021-05-17T08:49:54
368,113,199
0
0
null
null
null
null
UTF-8
Python
false
false
448
py
from django.http import HttpResponse from django.shortcuts import render from.models import place from.models import blog # Create your views here. def fun(request): obj=place.objects.all() ob=blog.objects.all() return render(request,"index.html",{'results':obj,'res':ob}) def add(request): num1=int(request.POST["num1"]) num2=int(request.POST["num2"]) num3=num1+num2 return render(request,"result.html",{"add":num3})
[ "kripas1990@gmail.com" ]
kripas1990@gmail.com
6414416c57b15df463a316ab5eb47dfd60c206ad
ac1e944eb288c8b13a0bef0ee7de85ee6d30b4c0
/django/portfolio/portfolio/settings.py
76fb42fa653d4ce899544cb20f47fd381ab5663a
[]
no_license
Jayson7/random-projects
05dd175d00e9bd62cb39973c3439846f641675c8
cdbebb896a0ecea0de543f16ecf4661e519ec0bb
refs/heads/master
2023-06-19T01:54:08.339954
2021-07-19T01:40:09
2021-07-19T01:40:09
383,971,840
0
0
null
null
null
null
UTF-8
Python
false
false
3,783
py
from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-rfpl8-6sksa!z!4vqh@b9ddcxnu8j-w*j7e=rq*-^3w-qte*kc' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] import cloudinary import cloudinary.uploader import cloudinary.api cloudinary.config( cloud_name = "jaytech", api_key = "279168237821868", api_secret = "VQAHpUEHyJfxhces1SiW5F8srEU", secure = True ) # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', "apps", "crispy_forms", 'cloudinary', 'django_filters', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', ] ROOT_URLCONF = 'portfolio.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['Templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'portfolio.wsgi.application' CRISPY_TEMPLATE_PACK = 'bootstrap4' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True import os # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_DIRS = os.path.join(BASE_DIR, 'media') STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # https://warehouse.python.org/project/whitenoise/ STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
[ "lexxiijoo70@gmail.com" ]
lexxiijoo70@gmail.com
e210432980b1cb9606281d024f148ab4962d8f97
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_211/266.py
73a2efd2c2aea6e7baf64b46d9dff97a79692c99
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,588
py
import sys import time ### I/O wrapper ### class FileParser: """Read numbers/strings from file (or stdin by default), one line by one. """ def __init__(self, filepath=None, type=None): if filepath is None: self.fd = sys.stdin else: self.fd = open(filepath, type) def read_string(self): return self.fd.readline().rstrip() def read_words(self): return [x for x in self.read_string().split()] def read_int(self): return int(self.fd.readline()) def read_integers(self): return [int(x) for x in self.fd.readline().rstrip().split()] def read_float(self): return float(self.fd.readline()) def read_floats(self): return [float(x) for x in self.fd.readline().rstrip().split()] def write(self, context): if self.fd is not sys.stdin: self.fd.write(context+"\n") else: print(context) return def close(self): if self.fd is not sys.stdin: self.fd.close() self.fd = None def MultiThread(fun, input): from multiprocessing.dummy import Pool as ThreadPool pool = ThreadPool() results = pool.starmap(fun, input) pool.close() pool.join() return list(filter(None.__ne__, results)) ### specify the problem meta information ### problemID = "C" # A, B, C, D... problemSize = "local" # small, large, local # filename = "%s-%s-practice" % (problemID, problemSize) filename = "C-small-1-attempt1" ### the algorithm that solve the cases ### globalCaseID = 0 def solve(case): # record the start timing timing.append(time.time()) N, K, U, P = case count = 0 for foo in P: if abs(foo-0) < 1e-7: count += 1 if count: inc = min(U / count, 1) for i in range(N): if abs(P[i]-0) < 1e-7: P[i] += inc U -= inc while U > 0 and sum(P) < N: P = sorted(P) i = 1 while i < N and P[i] == P[0]: i += 1 if i < N: Next = P[i] else: Next = 1.0 inc = min(U / i, Next - P[0]) for j in range(i): P[j] += inc U -= inc ans = 1.0 for foo in P: ans *= foo timing.append(time.time()) global globalCaseID globalCaseID += 1 print("Case %d" % globalCaseID, ans, "\t\t Elapsed: %.2f seconds" % (timing[-1] - timing[-2])) return ans ### solve the test cases ### # for the purpose of counting the total elapsed time timing = [time.time()] # open the input / output files f_in = FileParser(filename+".in", "r") f_out = FileParser(filename+".out", "w") # parse the input, and store them into cases cases = [] T = f_in.read_int() for _ in range(T): # read the input data of each case # f_in.read_string(), f_in.read_words() # f_in.read_int(), f_in.read_integers() # f_in.read_float(), f_in.read_floats() N, K = f_in.read_integers() U = f_in.read_float() P = f_in.read_floats() cases.append((N, K, U, P)) # solve each test case #anses = MultiThread(solve, zip(cases)) for caseID in range(1, T+1): # solve the case ans = solve(cases[caseID-1]) #ans = anses[caseID-1] # print the answer to output file f_out.write("Case #%d: %.9f" % (caseID, ans)) # close the input / output files f_in.close() f_out.close() # output the total elapsed time timing.append(time.time()) total_time = timing[-1] - timing[0] print("Total elapsed time: %.2f seconds / %.2f minutes" % (total_time, total_time/60))
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
07fc07bcebb263a435c469b86d08d4dc46022037
60cb975f3e0251c73c457271bce8a7b2036e422b
/studysrc/day01/testIf.py
cd4b701787955e49548fb8081304b49a80d6e252
[]
no_license
49257620/reboot
0a2341f23bc1a6f3ae47b59f772919228c623544
86b348228d1a25d78c45b0e9022d7c773544373b
refs/heads/master
2018-11-17T19:19:58.969710
2018-09-25T03:15:57
2018-09-25T03:15:57
125,727,532
0
0
null
null
null
null
UTF-8
Python
false
false
409
py
# encoding utf-8 promote = input('看到西瓜了吗?(看到输入\'Y\'或者\'y\'):') money = 100.0 priceBaozi = 11.5 priceXigua = 23.8 print('>>>买了一斤包子,花费'+str(priceBaozi)+'元') money = money - priceBaozi if promote == 'Y' or promote == 'y': print('>>>买了一个西瓜,花费'+str(priceXigua)+'元') money = money - priceXigua print('剩余:' + str(money) + '元')
[ "49257620@qq.com" ]
49257620@qq.com
7f5fad0d8b83d94bd6cdc61d8149280442be7b1a
71460b3fa81c386b99a260ccf59c109bcde3b953
/realtime_data_processor.py
2826b5f671b03c4c43341dc13008b15195657c8a
[ "MIT" ]
permissive
ec500-software-engineering/exercise-1-modularity-ZhibinHuang
831f07f31bfaa2a5c3b043e9fb66942208ca6020
ab36bf593fa61fe183c57af15a011d4353581d6d
refs/heads/master
2020-04-21T21:49:56.137069
2019-02-14T23:40:19
2019-02-14T23:40:19
169,891,502
1
0
null
null
null
null
UTF-8
Python
false
false
2,039
py
import time import random import threading from common_types import SensorDataType from common_types import Message, MessageUrgency class RealTimeDataProcessor(threading.Thread): def __init__(self, in_queue, notification_man): super().__init__() self._in_queue = in_queue self._notification_man = notification_man # Can probably put these functions in a separate class @staticmethod def blood_pressure_is_normal(pressure_data): return 90 <= pressure_data.get_systolic() <= 120 \ and 60 <= pressure_data.get_diastolic() <= 80 @staticmethod def blood_pulse_is_normal(pulse_data): return 60 <= pulse_data.get_pulse() <= 100 def run(self): ''' In here we need to process data we receive from sensor read queue. If any problems are detected in the attached patient's vitals we issue a command to the notification manager :return: ''' while True: incoming_data = self._in_queue.get(block=True) if incoming_data.get_type() == SensorDataType.BLOOD_PRESSURE: if not RealTimeDataProcessor.blood_pressure_is_normal(incoming_data): self._notification_man.send_message( Message( '!!!! PATIENT ALERT BLOOD PRESSURE ABNORMAL !!!!', MessageUrgency.HIGH_URGENCY ) ) elif incoming_data.get_type() == SensorDataType.BLOOD_PULSE: if not RealTimeDataProcessor.blood_pulse_is_normal(incoming_data): self._notification_man.send_message( Message( '!!!! PATIENT ALERT PULSE IS ABNORMAL !!!!', MessageUrgency.HIGH_URGENCY ) ) # yield quantum/time slice for other ready threads time.sleep( random.randint(1, 3) )
[ "noreply@github.com" ]
ec500-software-engineering.noreply@github.com
568a052bbe4f8f62f7f7f617b5b3f6e9d966ea8a
a80e9eb7ade3d43ce042071d796c00dd10b93225
/ch_6/stars_data_dict2.py
8633d4020bddd3caec1f4b9297bb5e9acdc1657d
[]
no_license
ksjpswaroop/python_primer
69addfdb07471eea13dccfad1f16c212626dee0a
99c21d80953be3c9dc95f3a316c04b0c5613e830
refs/heads/master
2020-07-14T17:37:45.923796
2014-06-06T22:30:48
2014-06-06T22:30:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,415
py
# Exercise 6.13 infile = open('stars.dat', 'r') data = {} for line in infile.readlines()[1:]: words = line.split() name = ' '.join(words[:-3]) if words[-3] == '-': distance = '-' apparent_brightness = '-' else: distance = float(words[-3]) apparent_brightness = float(words[-2]) luminosity = float(words[-1]) data[name] = {'distance': distance, 'apparent brightness': apparent_brightness, 'luminosity': luminosity} print '='*68 print '%-20s %12s %18s %15s' % ('Star', 'Distance', 'App. brightness', 'Luminosity') print '-'*68 for star in data: if star == 'Sun': print '%-20s %12s %18s %15.5f' % (star, data[star]['distance'], data[star]['apparent brightness'], data[star]['luminosity']) else: print '%-20s %12f %18f %15.5f' % (star, data[star]['distance'], data[star]['apparent brightness'], data[star]['luminosity']) print '='*68 """ Sample run: python stars_data_dict2.py ==================================================================== Star Distance App. brightness Luminosity -------------------------------------------------------------------- Wolf 359 7.700000 0.000001 0.00002 Sun - - 1.00000 Alpha Centauri C 4.200000 0.000010 0.00006 Alpha Centauri B 4.300000 0.077000 0.45000 Alpha Centauri A 4.300000 0.260000 1.56000 Luyten 726-8 A 8.400000 0.000003 0.00006 Sirius B 8.600000 0.001000 0.00300 Sirius A 8.600000 1.000000 23.60000 Luyten 726-8 B 8.400000 0.000002 0.00004 BD +36 degrees 2147 8.200000 0.000300 0.00600 Barnard's Star 6.000000 0.000040 0.00050 Ross 154 9.400000 0.000020 0.00050 ==================================================================== =============================== """
[ "noahwaterfieldprice@gmail.com" ]
noahwaterfieldprice@gmail.com
8a2be07e1e5c38ef9e5fb4cc1ec1310b15899623
202be9ce15e7e41bad55e6bbe4d0c941ecbb6781
/1037 在霍格沃茨找零钱.py
e054872072ec15b2a83bb4a99f9fb821c4ae05fb
[]
no_license
junyechen/Basic-level
ae55ab4e13fd38595772786af25fcc91c055f28c
a6e15bc3829dfe05cefc248454f0433f8070cdfb
refs/heads/master
2020-04-29T08:01:21.936408
2019-07-06T04:16:14
2019-07-06T04:16:14
175,972,034
1
0
null
null
null
null
UTF-8
Python
false
false
1,445
py
''' 如果你是哈利·波特迷,你会知道魔法世界有它自己的货币系统 —— 就如海格告诉哈利的:“十七个银西可(Sickle)兑一个加隆(Galleon),二十九个纳特(Knut)兑一个西可,很容易。”现在,给定哈利应付的价钱 P 和他实付的钱 A,你的任务是写一个程序来计算他应该被找的零钱。 输入格式: 输入在 1 行中分别给出 P 和 A,格式为 Galleon.Sickle.Knut,其间用 1 个空格分隔。这里 Galleon 是 [0, 10​7​​] 区间内的整数,Sickle 是 [0, 17) 区间内的整数,Knut 是 [0, 29) 区间内的整数。 输出格式: 在一行中用与输入同样的格式输出哈利应该被找的零钱。如果他没带够钱,那么输出的应该是负数。 输入样例 1: 10.16.27 14.1.28 输出样例 1: 3.2.1 输入样例 2: 14.1.28 10.16.27 输出样例 2: -3.2.1 ''' ##################################################### ''' 非常简单的题目 ''' ##################################################### P, A = [i.split('.') for i in input().split()] P = 29 * 17 * int(P[0]) + 29 * int(P[1]) + int(P[2]) A = 29 * 17 * int(A[0]) + 29 * int(A[1]) + int(A[2]) Waste = A - P if Waste >= 0: flag = True else: flag = False Waste = -Waste Knut = Waste % (17 * 29) % 29 Gallen = Waste // (29 * 17) Sickle = Waste % (17 * 29) // 29 if not flag: print('-',end='') print(Gallen,Sickle,Knut,sep='.')
[ "chenjunyeword@outlook.com" ]
chenjunyeword@outlook.com
f208947393151e0fea908eb59eb830d80620d0fe
b4916436d437d98f79ae2af4e56fa1acd5f84e7f
/pycozmo/robot.py
33d37092a81e8891be669da54fffd7932716207c
[ "MIT", "Apache-2.0" ]
permissive
VictorTagayun/pycozmo
b875f3e0008efeae39f3cea80418aee66af8511e
dd971aad2d32419deae00b1294922b416ba2e2b9
refs/heads/master
2020-07-30T07:22:21.715324
2019-09-13T09:32:51
2019-09-13T09:32:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,602
py
import math from . import util from . import protocol_encoder MIN_HEAD_ANGLE = util.Angle(degrees=-25) MAX_HEAD_ANGLE = util.Angle(degrees=44.5) MIN_LIFT_HEIGHT = util.Distance(mm=32.0) MAX_LIFT_HEIGHT = util.Distance(mm=92.0) LIFT_ARM_LENGTH = util.Distance(mm=66.0) LIFT_PIVOT_HEIGHT = util.Distance(mm=45.0) MAX_WHEEL_SPEED = util.Speed(mmps=200.0) class RobotStatusFlag(object): IS_MOVING = 0x1 IS_CARRYING_BLOCK = 0x2 IS_PICKING_OR_PLACING = 0x4 IS_PICKED_UP = 0x8 IS_BODY_ACC_MODE = 0x10 IS_FALLING = 0x20 IS_ANIMATING = 0x40 IS_PATHING = 0x80 LIFT_IN_POS = 0x100 HEAD_IN_POS = 0x200 IS_ANIM_BUFFER_FULL = 0x400 IS_ANIMATING_IDLE = 0x800 IS_ON_CHARGER = 0x1000 IS_CHARGING = 0x2000 CLIFF_DETECTED = 0x4000 ARE_WHEELS_MOVING = 0x8000 IS_CHARGER_OOS = 0x10000 RobotStatusFlagNames = { RobotStatusFlag.IS_MOVING: "IS_MOVING", RobotStatusFlag.IS_CARRYING_BLOCK: "IS_CARRYING_BLOCK", RobotStatusFlag.IS_PICKING_OR_PLACING: "IS_PICKING_OR_PLACING", RobotStatusFlag.IS_PICKED_UP: "IS_PICKED_UP", RobotStatusFlag.IS_BODY_ACC_MODE: "IS_BODY_ACC_MODE", RobotStatusFlag.IS_FALLING: "IS_FALLING", RobotStatusFlag.IS_ANIMATING: "IS_ANIMATING", RobotStatusFlag.IS_PATHING: "IS_PATHING", RobotStatusFlag.LIFT_IN_POS: "LIFT_IN_POS", RobotStatusFlag.HEAD_IN_POS: "HEAD_IN_POS", RobotStatusFlag.IS_ANIM_BUFFER_FULL: "IS_ANIM_BUFFER_FULL", RobotStatusFlag.IS_ANIMATING_IDLE: "IS_ANIMATING_IDLE", RobotStatusFlag.IS_ON_CHARGER: "IS_ON_CHARGER", RobotStatusFlag.IS_CHARGING: "IS_CHARGING", RobotStatusFlag.CLIFF_DETECTED: "CLIFF_DETECTED", RobotStatusFlag.ARE_WHEELS_MOVING: "ARE_WHEELS_MOVING", RobotStatusFlag.IS_CHARGER_OOS: "IS_CHARGER_OOS", } BODY_COLOR_NAMES = { protocol_encoder.BodyColor.WHITE_v10: "Original", protocol_encoder.BodyColor.RESERVED: "Reserved", protocol_encoder.BodyColor.WHITE_v15: "White", protocol_encoder.BodyColor.CE_LM_v15: "CE_LM", protocol_encoder.BodyColor.LE_BL_v16: "LE_BL", } class LiftPosition(object): """ Represents the position of Cozmo's lift. The class allows the position to be referred to as either absolute height above the ground, as a ratio from 0.0 to 1.0, or as the angle of the lift arm relative to the ground. Args: height (:class:`cozmo.util.Distance`): The height of the lift above the ground. ratio (float): The ratio from 0.0 to 1.0 that the lift is raised from the ground. angle (:class:`cozmo.util.Angle`): The angle of the lift arm relative to the ground. """ __slots__ = ('_height', ) def __init__(self, height=None, ratio=None, angle=None): def _count_arg(arg): # return 1 if argument is set (not None), 0 otherwise return 0 if (arg is None) else 1 num_provided_args = _count_arg(height) + _count_arg(ratio) + _count_arg(angle) if num_provided_args != 1: raise ValueError("Expected one, and only one, of the distance, ratio or angle keyword arguments") if height is not None: if not isinstance(height, util.Distance): raise TypeError("Unsupported type for distance - expected util.Distance") self._height = height elif ratio is not None: height_mm = MIN_LIFT_HEIGHT.mm + (ratio * (MAX_LIFT_HEIGHT.mm - MIN_LIFT_HEIGHT.mm)) self._height = util.Distance(mm=height_mm) elif angle is not None: if not isinstance(angle, util.Angle): raise TypeError("Unsupported type for angle - expected util.Angle") height_mm = (math.sin(angle.radians) * LIFT_ARM_LENGTH.mm) + LIFT_PIVOT_HEIGHT.mm self._height = util.Distance(mm=height_mm) def __repr__(self): return "<%s height=%s ratio=%s angle=%s>" % (self.__class__.__name__, self._height, self.ratio, self.angle) @property def height(self) -> util.Distance: """ Height above the ground. """ return self._height @property def ratio(self) -> float: """ The ratio from 0 to 1 that the lift is raised, 0 at the bottom, 1 at the top. """ ratio = ((self._height.mm - MIN_LIFT_HEIGHT.mm) / (MAX_LIFT_HEIGHT.mm - MIN_LIFT_HEIGHT.mm)) return ratio @property def angle(self) -> util.Angle: """ The angle of the lift arm relative to the ground. """ sin_angle = (self._height.mm - LIFT_PIVOT_HEIGHT.mm) / LIFT_ARM_LENGTH.mm angle = math.asin(sin_angle) return util.Angle(radians=angle)
[ "zayfod@gmail.com" ]
zayfod@gmail.com
2f3bad0b23444dbe7cdadc422637b213f444f4f0
7f21abecb951371885ca007bd24eebbb61e8d0a0
/lesson_012/python_snippets/04_queues.py
81fbb8e8c8b542f08790a434e6a058aaf9c808bf
[]
no_license
zaboevai/python_base
a076b0d8798f103347dddcf0be0d09fb02815609
c689568c926db5ff4f9cdb4f5c335fac7a434130
refs/heads/develop
2022-11-25T11:04:21.678107
2021-06-09T19:48:52
2021-06-09T19:48:52
191,818,125
23
18
null
2022-11-22T07:57:23
2019-06-13T19:01:48
Python
UTF-8
Python
false
false
4,284
py
# -*- coding: utf-8 -*- # Кроме блокировок и примитивов синхронизации существует еще один способ обмена данными между потоками. # Это очереди - Queue - https://docs.python.org/3.6/library/queue.html # В очередь можно положить элемент и взять его. Queue гарантирует что потоки не помешают друг другу # - операции очереди атомарные и блокирующие. import time from collections import defaultdict import queue import random import threading FISH = (None, 'плотва', 'окунь', 'лещ') # Посадим всех рыбаков в лодку, в которой есть садок для улова. class Fisher(threading.Thread): def __init__(self, name, worms, catcher, *args, **kwargs): super().__init__(*args, **kwargs) self.name = name self.worms = worms self.catcher = catcher def run(self): for worm in range(self.worms): print(f'{self.name}, {worm}: забросили ждем...', flush=True) # time.sleep(random.randint(1, 3) / 10) fish = random.choice(FISH) if fish is None: print(f'{self.name}, {worm}: сожрали червяка!', flush=True) else: print(f'{self.name}, {worm}: поймал {fish} и хочет положить его в садок', flush=True) if self.catcher.full(): print(f'{self.name}, {worm}: приемщик полон !!!', flush=True) # Этот метод у очереди - атомарный и блокирующий # Поток приостанавливается, пока нет места в очереди self.catcher.put(fish) print(f'{self.name}, {worm}: наконец-то отдал {fish} приемщику', flush=True) class Boat(threading.Thread): def __init__(self, worms_per_fisher=10, *args, **kwargs): super().__init__(*args, **kwargs) self.fishers = [] self.worms_per_fisher = worms_per_fisher self.catcher = queue.Queue(maxsize=2) self.fish_tank = defaultdict(int) def add_fisher(self, name): fisher = Fisher(name=name, worms=self.worms_per_fisher, catcher=self.catcher) self.fishers.append(fisher) def run(self): print('Лодка вышла в море...', flush=True) for fisher in self.fishers: fisher.start() while True: try: # Этот метод у очереди - атомарный и блокирующий, # Поток приостанавливается, пока нет элементов в очереди fish = self.catcher.get(timeout=1) print(f'Приемщик принял {fish} и положил в садок', flush=True) self.fish_tank[fish] += 1 except queue.Empty: print(f'Приемщику нет рыбы в течении 1 секунды', flush=True) if not any(fisher.is_alive() for fisher in self.fishers): break for fisher in self.fishers: fisher.join() print(f'Лодка возвращается домой с {self.fish_tank}', flush=True) boat = Boat(worms_per_fisher=10) humans = ['Васек', 'Колян', 'Петрович', 'Хмурый', 'Клава', ] for name in humans: boat.add_fisher(name=name) boat.start() boat.join() print(f'лодка привезла {boat.catch}') # Мы использовали очередь вида FIFO - first input, first output - первый вошел, первый вышел. # В модуле queue есть еще два вида очередей: # LifoQueue - last input, first output - последний вошел, первый вышел (еще такую очередь называют стеком). # PriorityQueue - первым возвращается наименьший элемент, то есть sorted(list(entries))[0]
[ "you@example.com" ]
you@example.com
0e4a1d59e13199cb3b7d2caf6557074911679da9
da9cbae7c2b9789951874f4b2dd9eba990753bbd
/run2018/crab_V0cumu_HIMB2018_Ks_SBPos_Mid_sysMCBias_v1.py
cdc96fb1d89a9fba410816b27db346a1d6940c11
[]
no_license
BetterWang/QWCumuDiff
d0be53a87dd345153b603a16617f572d5adc288c
cccb286bb9ee4b9cb3aa78cc839ae993c66624f1
refs/heads/master
2021-09-13T01:30:37.988189
2021-08-25T13:36:57
2021-08-25T13:36:57
81,896,082
0
0
null
null
null
null
UTF-8
Python
false
false
2,008
py
from CRABAPI.RawCommand import crabCommand from CRABClient.UserUtilities import config from CRABClient.ClientExceptions import ClientException from httplib import HTTPException config = config() config.General.requestName = 'HIMB0_V0Cumu_Ks_SBPos_Mid_sysMCBias_v2' config.General.workArea = 'CrabArea' config.General.transferOutputs = True config.General.transferLogs = False config.JobType.pluginName = 'Analysis' config.JobType.psetName = 'qwcumu_PbPb18_V0_MCBias_v2s.py' #config.JobType.maxJobRuntimeMin = 2500 config.JobType.inputFiles = ['MC_Full_BDT250_D4.KS.weights.xml'] config.JobType.pyCfgParams = ['part=KS', 'massRange=SBPos', 'rap=Mid'] config.Data.inputDataset = '/HIMinimumBias0/qwang-V0Skim_v3-5f932986cf38f9e8dbd6c3aea7f6c2b4/USER' config.Data.inputDBS = 'phys03' config.Data.splitting = 'Automatic' #config.Data.unitsPerJob = 8 config.Data.outLFNDirBase = '/store/group/phys_heavyions/qwang/PbPb2018' config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/HI/PromptReco/Cert_326381-327564_HI_PromptReco_Collisions18_JSON.txt' config.Data.publication = False #config.Data.outputDatasetTag = '' config.Data.useParent = True config.Data.ignoreLocality = True config.Site.whitelist = ['T2_US_Vanderbilt'] config.Site.storageSite = 'T2_CH_CERN' config.Data.allowNonValidInputDataset = True config.JobType.allowUndistributedCMSSW = True #try: # crabCommand('submit', config = config) #except HTTPException as hte: # print "Failed submitting task: %s" % (hte.headers) #except ClientException as cle: # print "Failed submitting task: %s" % (cle) # config.General.requestName = 'HIMB1_V0Cumu_Ks_SBPos_Mid_sysMCBias_v2' config.Data.inputDataset = '/HIMinimumBias1/qwang-V0Skim_v3-5f932986cf38f9e8dbd6c3aea7f6c2b4/USER' try: crabCommand('submit', config = config) except HTTPException as hte: print "Failed submitting task: %s" % (hte.headers) except ClientException as cle: print "Failed submitting task: %s" % (cle)
[ "BetterWang@gmail.com" ]
BetterWang@gmail.com
841dc8000e963e39aa4a57d8541acaba350bf449
2b8c88dfee5c5a784357515eafe8cd5f997c8774
/leetcode/54.spiral-matrix.py
a4bbfff15df9fe1594001407e0139df5d8c37fd6
[]
no_license
archenRen/learnpy
e060f3aa2f77c35fc1b12345720af6c8b528da57
934ef76b97297f746a722a48c76672c7bc744cd9
refs/heads/master
2022-04-28T20:25:59.114036
2020-05-03T02:16:03
2020-05-03T02:16:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,076
py
# # @lc app=leetcode id=54 lang=python3 # # [54] Spiral Matrix # # https://leetcode.com/problems/spiral-matrix/description/ # # algorithms # Medium (31.36%) # Likes: 1345 # Dislikes: 450 # Total Accepted: 264.2K # Total Submissions: 842.4K # Testcase Example: '[[1,2,3],[4,5,6],[7,8,9]]' # # Given a matrix of m x n elements (m rows, n columns), return all elements of # the matrix in spiral order. # # Example 1: # # # Input: # [ # ⁠[ 1, 2, 3 ], # ⁠[ 4, 5, 6 ], # ⁠[ 7, 8, 9 ] # ] # Output: [1,2,3,6,9,8,7,4,5] # # # Example 2: # # Input: # [ # ⁠ [1, 2, 3, 4], # ⁠ [5, 6, 7, 8], # ⁠ [9,10,11,12] # ] # Output: [1,2,3,4,8,12,11,10,9,5,6,7] # # # https://leetcode.com/problems/spiral-matrix/discuss/20571/1-liner-in-Python-%2B-Ruby # 有详细分析 # 递归解法 # 去一行后,顺时针旋转矩阵。参考leetcode-48. 但是顺时针旋转应该先转置再倒置。 class Solution: def spiralOrder(self, matrix: 'List[List[int]]') -> 'List[int]': return matrix and list(matrix.pop(0)) + self.spiralOrder(list(zip(*matrix))[::-1])
[ "wangdi03@ppdai.com" ]
wangdi03@ppdai.com
7bd4527bda1c4953bf8f336080c3691c2b70fad9
0f30dbffc77960edf69fa18c78c6d1a1658bb3dc
/tests/test_cu_linear_operator.py
3ad17786ed37a887b11b56d0139e1ac2267065ec
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
ameli/imate
859c5ed35540cc44058cd908ce44485487acd041
de867f131a4cda7d60a68bf0558e896fae89d776
refs/heads/main
2023-08-29T07:03:53.512434
2023-08-15T23:39:30
2023-08-15T23:39:30
308,965,310
5
1
null
null
null
null
UTF-8
Python
false
false
1,169
py
#! /usr/bin/env python # SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu> # SPDX-License-Identifier: BSD-3-Clause # SPDX-FileType: SOURCE # # This program is free software: you can redistribute it and/or modify it # under the terms of the license found in the LICENSE.txt file in the root # directory of this source tree. # ======= # Imports # ======= import sys # This package might not be compiled with the cuda support. try: from imate._cu_linear_operator.tests import test_cu_matrix, \ test_cu_affine_matrix_function subpackage_exists = True except ModuleNotFoundError: subpackage_exists = False # ======================= # test cu linear operator # ======================= def test_cu_linear_operator(): """ A wrapper for :mod:`imate._linear_operator.tests` test sub-module. """ # A test for linear operator if subpackage_exists: try: test_cu_matrix() test_cu_affine_matrix_function() except RuntimeError as e: print(e) # =========== # System Main # =========== if __name__ == "__main__": sys.exit(test_cu_linear_operator())
[ "sia.sunrise@gmail.com" ]
sia.sunrise@gmail.com
b5c178156f40eedcd15619cedc40e1807107e05c
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
/Lab/venv/lib/python3.8/site-packages/OpenGL/GL/ARB/texture_buffer_range.py
7d62ab26b7cb24e42b8cd5f95489e5d880249a37
[]
no_license
BartoszRudnik/GK
1294f7708902e867dacd7da591b9f2e741bfe9e5
6dc09184a3af07143b9729e42a6f62f13da50128
refs/heads/main
2023-02-20T19:02:12.408974
2021-01-22T10:51:14
2021-01-22T10:51:14
307,847,589
0
0
null
null
null
null
UTF-8
Python
false
false
1,228
py
'''OpenGL extension ARB.texture_buffer_range This module customises the behaviour of the OpenGL.raw.GL.ARB.texture_buffer_range to provide a more Python-friendly API Overview (from the spec) ARB_texture_buffer_object (which was promoted to core in OpenGL 3.1) introduced the ability to attach the data store of a buffer object to a buffer texture and access it from shaders. The extension only allows the entire store of the buffer object to the texture. This extension expands on this and allows a sub-range of the buffer's data store to be attached to a texture. This can be used, for example, to allow multiple buffer textures to be backed by independent sub-ranges of the same buffer object, or for different sub-ranges of a single buffer object to be used for different purposes. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/texture_buffer_range.txt ''' from OpenGL.raw.GL.ARB.texture_buffer_range import _EXTENSION_NAME def glInitTextureBufferRangeARB(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
[ "rudnik49@gmail.com" ]
rudnik49@gmail.com
58f2641de9afc33cf7980ff3ad484fd18b82cdfe
3f566babc0230a9d1d2a8ab6a4f0fee9bf2f497b
/tools/mayaCore/cmds/pSets.py
e162540227c1b10025df837e4030443add8ac9ad
[]
no_license
snaress/studio_dev
3118e6d7b5ab7e9f7f318cf0c2c4145ad61d5f7f
a58608922abe1d47bf3d807c5db11e265aad85a2
refs/heads/master
2021-01-21T13:17:58.396068
2016-04-25T00:42:27
2016-04-25T00:42:27
51,249,314
0
0
null
null
null
null
UTF-8
Python
false
false
1,093
py
try: import maya.cmds as mc except: pass def getAllSets(suffixes=None): """ Get all sets ending with given suffixes :param suffixes: Set suffixes :type suffixes: list :return: Sets list :rtype: list """ setList = [] for s in mc.ls(type='objectSet') or []: for ext in suffixes: if suffixes is None: setList.append(s) else: if s.endswith(ext): setList.append(s) return setList def removeSets(sets=None, suffixes=None): """ Delete given sets or all sets given by 'getAllSets()' :param sets: Sets list to delete :type sets: list :param suffixes: Set suffixes :type suffixes: list """ #--- Get Sets ---# if sets is None: allSets = getAllSets(suffixes=suffixes) else: allSets = sets #--- Remove Sets ---# while allSets: for s in allSets: try: mc.delete(s) print 'delete', s except: pass allSets = getAllSets()
[ "jln.buisseret@gmail.com" ]
jln.buisseret@gmail.com
572594fbea40e8ffdd3e9a85fce23c0041774610
ca4e57a6861f1e24d1521bf5b775aee3b6db7725
/lex1.py
1d60b17dd7ce6380a7e868149f8d16909811c278
[]
no_license
mathi98/madhu
e296a477f3684a596c74a228c9ce867f1f60c3f8
cae2adb19ccf7c7f12212d694cd0d09614cd5d81
refs/heads/master
2020-05-23T01:06:54.830389
2019-06-28T14:13:07
2019-06-28T14:13:07
186,582,298
0
2
null
null
null
null
UTF-8
Python
false
false
183
py
k=int(input()) l=list(map(str,input().split())) a=sorted(l,key=len) for i in range(len(a)-1): if len(a[i])==len(a[i+1]) and a[i]>a[i+1]: a[i],a[i+1]=a[i+1],a[i] print(*a)
[ "noreply@github.com" ]
mathi98.noreply@github.com
90af24bb6ece2824041e3e25a9bab47b6f3f620d
ad14c9b6454c3e8e657e994914bdfe97c2188c22
/oostepbystep/person.py
ed964445701ac3ae80004664d035181f4263c8b3
[]
no_license
woodyyan/twa-python-bootcamp
1e6ecdac10a7fb3366ce7180eae93678afe1d9a8
3ef20a88e6954662e227ccf804b76ebc5fb6d74d
refs/heads/master
2020-09-12T13:23:02.585373
2019-12-25T12:24:33
2019-12-25T12:24:33
222,439,401
0
0
null
null
null
null
UTF-8
Python
false
false
198
py
class Person: def __init__(self, name, age): self.name = name self.age = age def introduce(self): return 'My name is %s. I am %s years old.' % (self.name, self.age)
[ "colorguitar@hotmail.com" ]
colorguitar@hotmail.com
2b5f99242912f7e6a226598779f4b87dc13a02f9
762c28b8cda476574d71453701d90caf56973556
/network/loss_lib.py
8b9b49cbad66a3cc2ee3da483ed518f89e61978b
[]
no_license
RuiLiFeng/code
cf480e6f4ad598512b8147374687c6a379a9dc43
6bc288bd7d9e3dfc7f6847aaaa12bcf21f4950de
refs/heads/master
2020-07-11T19:52:45.847382
2019-09-02T13:57:35
2019-09-02T13:57:35
204,631,332
2
1
null
null
null
null
UTF-8
Python
false
false
6,058
py
# coding=utf-8 # Copyright 2018 Google LLC & Hwalsuk Lee. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of popular GAN losses.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import utils import gin import tensorflow as tf def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits): """Checks the shapes and ranks of logits and prediction tensors. Args: d_real: prediction for real points, values in [0, 1], shape [batch_size, 1]. d_fake: prediction for fake points, values in [0, 1], shape [batch_size, 1]. d_real_logits: logits for real points, shape [batch_size, 1]. d_fake_logits: logits for fake points, shape [batch_size, 1]. Raises: ValueError: if the ranks or shapes are mismatched. """ def _check_pair(a, b): if a != b: raise ValueError("Shape mismatch: %s vs %s." % (a, b)) if len(a) != 2 or len(b) != 2: raise ValueError("Rank: expected 2, got %s and %s" % (len(a), len(b))) if (d_real is not None) and (d_fake is not None): _check_pair(d_real.shape.as_list(), d_fake.shape.as_list()) if (d_real_logits is not None) and (d_fake_logits is not None): _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list()) if (d_real is not None) and (d_real_logits is not None): _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list()) @gin.configurable(whitelist=[]) def non_saturating(d_real_logits, d_fake_logits, d_real=None, d_fake=None): """Returns the discriminator and generator loss for Non-saturating loss. Args: d_real_logits: logits for real points, shape [batch_size, 1]. d_fake_logits: logits for fake points, shape [batch_size, 1]. d_real: ignored. d_fake: ignored. Returns: A tuple consisting of the discriminator loss, discriminator's loss on the real samples and fake samples, and the generator's loss. """ with tf.name_scope("non_saturating_loss"): check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits) d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=d_real_logits, labels=tf.ones_like(d_real_logits), name="cross_entropy_d_real")) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=d_fake_logits, labels=tf.zeros_like(d_fake_logits), name="cross_entropy_d_fake")) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=d_fake_logits, labels=tf.ones_like(d_fake_logits), name="cross_entropy_g")) return d_loss, d_loss_real, d_loss_fake, g_loss @gin.configurable(whitelist=[]) def wasserstein(d_real_logits, d_fake_logits, d_real=None, d_fake=None): """Returns the discriminator and generator loss for Wasserstein loss. Args: d_real_logits: logits for real points, shape [batch_size, 1]. d_fake_logits: logits for fake points, shape [batch_size, 1]. d_real: ignored. d_fake: ignored. Returns: A tuple consisting of the discriminator loss, discriminator's loss on the real samples and fake samples, and the generator's loss. """ with tf.name_scope("wasserstein_loss"): check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits) d_loss_real = -tf.reduce_mean(d_real_logits) d_loss_fake = tf.reduce_mean(d_fake_logits) d_loss = d_loss_real + d_loss_fake g_loss = -d_loss_fake return d_loss, d_loss_real, d_loss_fake, g_loss @gin.configurable(whitelist=[]) def least_squares(d_real, d_fake, d_real_logits=None, d_fake_logits=None): """Returns the discriminator and generator loss for the least-squares loss. Args: d_real: prediction for real points, values in [0, 1], shape [batch_size, 1]. d_fake: prediction for fake points, values in [0, 1], shape [batch_size, 1]. d_real_logits: ignored. d_fake_logits: ignored. Returns: A tuple consisting of the discriminator loss, discriminator's loss on the real samples and fake samples, and the generator's loss. """ with tf.name_scope("least_square_loss"): check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits) d_loss_real = tf.reduce_mean(tf.square(d_real - 1.0)) d_loss_fake = tf.reduce_mean(tf.square(d_fake)) d_loss = 0.5 * (d_loss_real + d_loss_fake) g_loss = 0.5 * tf.reduce_mean(tf.square(d_fake - 1.0)) return d_loss, d_loss_real, d_loss_fake, g_loss @gin.configurable(whitelist=[]) def hinge(d_real_logits, d_fake_logits, d_real=None, d_fake=None): """Returns the discriminator and generator loss for the hinge loss. Args: d_real_logits: logits for real points, shape [batch_size, 1]. d_fake_logits: logits for fake points, shape [batch_size, 1]. d_real: ignored. d_fake: ignored. Returns: A tuple consisting of the discriminator loss, discriminator's loss on the real samples and fake samples, and the generator's loss. """ with tf.name_scope("hinge_loss"): check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits) d_loss_real = tf.reduce_mean(tf.nn.relu(1.0 - d_real_logits)) d_loss_fake = tf.reduce_mean(tf.nn.relu(1.0 + d_fake_logits)) d_loss = d_loss_real + d_loss_fake g_loss = - tf.reduce_mean(d_fake_logits) return d_loss, d_loss_real, d_loss_fake, g_loss @gin.configurable("loss", whitelist=["fn"]) def get_losses(fn=non_saturating, **kwargs): """Returns the losses for the discriminator and generator.""" return utils.call_with_accepted_args(fn, **kwargs)
[ "frl1996@mail.ustc.edu.cn" ]
frl1996@mail.ustc.edu.cn
7b59590b96288ab93d3835dd03615e1183066465
ab00b17a719d02ef7eea2189b052787b5dc3f3e1
/jsk_arc2017_common/scripts/install_data.py
a29ba3942b2439d13ed1730325a124d43ba226ed
[ "BSD-3-Clause", "BSD-2-Clause" ]
permissive
708yamaguchi/jsk_apc
aa826cd991dbde43327887d03f686d278e37b8f1
b5d0c08f9a8628237b2c7fcf1d4fb60a632d99cb
refs/heads/master
2021-01-17T21:15:56.575294
2017-06-30T04:24:47
2017-06-30T05:41:40
84,169,587
0
0
null
2017-07-23T07:38:15
2017-03-07T07:39:52
Common Lisp
UTF-8
Python
false
false
1,190
py
#!/usr/bin/env python import multiprocessing import jsk_data PKG = 'jsk_arc2017_common' def download_data(path, url, md5): p = multiprocessing.Process( target=jsk_data.download_data, kwargs=dict( pkg_name=PKG, path=path, url=url, md5=md5, ), ) p.start() def main(): # dataset: v1 # augmentation: standard download_data( path='data/models/fcn32s_arc2017_dataset_v1_20170326_005.pth', url='https://drive.google.com/uc?id=0B9P1L--7Wd2vT1pnWnVsNERHTVk', md5='ae9d13c126389bd63bccf0db1551f31e', ) # dataset: v1 # augmentation: stack download_data( path='data/models/fcn32s_arc2017_dataset_v1_20170417.pth', url='https://drive.google.com/uc?id=0B9P1L--7Wd2vYWloN0FGeEhlcGs', md5='a098399a456de29ef8d4feaa8ae795e9', ) # dataset: v2 # augmentation: stack download_data( path='data/models/fcn32s_arc2017_datasetv2_cfg003_20170612.npz', url='https://drive.google.com/uc?id=0B9P1L--7Wd2vS1VaWWVFNDVFQ1k', md5='e4e07b66ebeaf6b33a79eb1b605ee3a3', ) if __name__ == '__main__': main()
[ "www.kentaro.wada@gmail.com" ]
www.kentaro.wada@gmail.com
85d948583795cd31b49710278b255196b01efc9a
73330107dd79b9973e7fbcd9aeda8039690139c6
/Word Break.py
46e17ae0c2f22f0f9bdf0737cf3597433dfd68e4
[]
no_license
nithinveer/leetcode-solutions
2f908cd204c130034def8934d41ef6869029a403
196e58cd38db846653fb074cfd0363997121a7cf
refs/heads/master
2021-06-25T22:28:50.391671
2021-04-20T07:04:19
2021-04-20T07:04:19
219,891,708
0
2
null
null
null
null
UTF-8
Python
false
false
1,862
py
class Solution(object): def __init__(self): self.found = False self.memo = {} def dfs(self, tmp, idx): # print(tmp,idx) #base case if tmp in self.wordDict and idx == len(self.s)-1: print(tmp, idx) self.found = True return True elif idx == len(self.s)-1: return False if tmp+"#"+str(idx) not in self.memo: a = False if tmp in self.wordDict: print(tmp, idx) a = self.dfs(self.s[idx+1], idx+1) b = self.dfs(tmp+self.s[idx+1], idx+1) self.memo[tmp+"#"+str(idx)] = a or b return self.memo[tmp+"#"+str(idx)] # return def wordBreak(self, s, wordDict): """ :type s: str :type wordDict: List[str] :rtype: bool """ self.wordDict = wordDict self.s = s self.dfs(s[0],0) return self.found def wordBreakOld(self, s, wordDict): """ :type s: str :type wordDict: List[str] :rtype: bool """ dp = [False for i in range(len(s) + 1)] dp[0] = True for i in range(1, len(s) + 1): for word in wordDict: if dp[i - len(word)] and s[i - len(word):i] == word: dp[i] = True return dp[-1] sol = Solution() s = "acaaaaabbbdbcccdcdaadcdccacbcccabbbbcdaaaaaadb" wordDict = ["abbcbda","cbdaaa","b","dadaaad","dccbbbc","dccadd","ccbdbc","bbca","bacbcdd","a","bacb","cbc","adc","c","cbdbcad","cdbab","db","abbcdbd","bcb","bbdab","aa","bcadb","bacbcb","ca","dbdabdb","ccd","acbb","bdc","acbccd","d","cccdcda","dcbd","cbccacd","ac","cca","aaddc","dccac","ccdc","bbbbcda","ba","adbcadb","dca","abd","bdbb","ddadbad","badb","ab","aaaaa","acba","abbb"] print(sol.wordBreak(s, wordDict))
[ "nithinveer@iitj.ac.in" ]
nithinveer@iitj.ac.in
76c2bec4f5e99360815ec228c4951542e0b41029
487ce91881032c1de16e35ed8bc187d6034205f7
/codes/CodeJamCrawler/CJ/16_0_1_AndriyM_problem_a.py
4457b1d99d97d7ebbca30cc6d77b89935bc71794
[]
no_license
DaHuO/Supergraph
9cd26d8c5a081803015d93cf5f2674009e92ef7e
c88059dc66297af577ad2b8afa4e0ac0ad622915
refs/heads/master
2021-06-14T16:07:52.405091
2016-08-21T13:39:13
2016-08-21T13:39:13
49,829,508
2
0
null
2021-03-19T21:55:46
2016-01-17T18:23:00
Python
UTF-8
Python
false
false
689
py
def digits(n): return set(map(int, str(n))) def last_seen(n): max_mult = 1000 digs = digits(n) current_mult = 1 while current_mult < max_mult and len(digs) < 10: current_mult += 1 digs = digs | digits(n*current_mult) if len(digs) == 10: return str(current_mult*n) else: return 'INSOMNIA' if __name__ == '__main__': # for x in range(0, 1000000): # print(last_seen(x)) with open('A-large.in', 'r') as inp: lines = inp.readlines() T = int(lines[0]) with open('A-large.out', 'w') as out: for i in range(1, T+ 1): out.write('Case #%d: %s\n' % (i, last_seen(int(lines[i]))))
[ "[dhuo@tcd.ie]" ]
[dhuo@tcd.ie]
511a86f6ac8020ec418a0b6a7242cf42893a18d8
fe2ac50a9b03ae6b43ee12676799a3ae51495310
/venv_coupe/Scripts/pip-script.py
73b270447d1f10e8b0533c078a061ac573264c91
[]
no_license
rvfedorin/CoupeCounter
482faaaaa005b64b26b7939d1b98810f19cb1b6b
86caeaa34dbfe71c7f8b76b8db9ee92e5dd0532e
refs/heads/master
2020-03-30T01:53:16.502119
2018-10-05T09:40:10
2018-10-05T09:40:10
150,600,160
0
0
null
null
null
null
UTF-8
Python
false
false
417
py
#!C:\Users\Wolf\PycharmProjects\coupe_count\venv_coupe\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip' __requires__ = 'pip==9.0.3' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('pip==9.0.3', 'console_scripts', 'pip')() )
[ "35657347+rvfedorin@users.noreply.github.com" ]
35657347+rvfedorin@users.noreply.github.com
a167c98b6d931efedb00c817e8c755d196939060
7087a5dd1772c9456f098bc024a894dcaeef5432
/backup/build/new-calkube/kubernetes-6.0.0_snapshot-py2.7.egg/kubernetes/client/models/v1beta1_mutating_webhook_configuration.py
27c1aca77257c666ab13d4af8626047f70c14a6a
[]
no_license
santhoshchami/kubecctl-python
5be7a5a17cc6f08ec717b3eb1c11719ef7653aba
cd45af465e25b0799d65c573e841e2acb983ee68
refs/heads/master
2021-06-23T11:00:43.615062
2019-07-10T16:57:06
2019-07-10T16:57:06
145,669,246
0
0
null
null
null
null
UTF-8
Python
false
false
6,939
py
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.10.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class V1beta1MutatingWebhookConfiguration(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'webhooks': 'list[V1beta1Webhook]' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'webhooks': 'webhooks' } def __init__(self, api_version=None, kind=None, metadata=None, webhooks=None): """ V1beta1MutatingWebhookConfiguration - a model defined in Swagger """ self._api_version = None self._kind = None self._metadata = None self._webhooks = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if webhooks is not None: self.webhooks = webhooks @property def api_version(self): """ Gets the api_version of this V1beta1MutatingWebhookConfiguration. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources :return: The api_version of this V1beta1MutatingWebhookConfiguration. :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """ Sets the api_version of this V1beta1MutatingWebhookConfiguration. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources :param api_version: The api_version of this V1beta1MutatingWebhookConfiguration. :type: str """ self._api_version = api_version @property def kind(self): """ Gets the kind of this V1beta1MutatingWebhookConfiguration. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :return: The kind of this V1beta1MutatingWebhookConfiguration. :rtype: str """ return self._kind @kind.setter def kind(self, kind): """ Sets the kind of this V1beta1MutatingWebhookConfiguration. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds :param kind: The kind of this V1beta1MutatingWebhookConfiguration. :type: str """ self._kind = kind @property def metadata(self): """ Gets the metadata of this V1beta1MutatingWebhookConfiguration. Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. :return: The metadata of this V1beta1MutatingWebhookConfiguration. :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """ Sets the metadata of this V1beta1MutatingWebhookConfiguration. Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. :param metadata: The metadata of this V1beta1MutatingWebhookConfiguration. :type: V1ObjectMeta """ self._metadata = metadata @property def webhooks(self): """ Gets the webhooks of this V1beta1MutatingWebhookConfiguration. Webhooks is a list of webhooks and the affected resources and operations. :return: The webhooks of this V1beta1MutatingWebhookConfiguration. :rtype: list[V1beta1Webhook] """ return self._webhooks @webhooks.setter def webhooks(self, webhooks): """ Sets the webhooks of this V1beta1MutatingWebhookConfiguration. Webhooks is a list of webhooks and the affected resources and operations. :param webhooks: The webhooks of this V1beta1MutatingWebhookConfiguration. :type: list[V1beta1Webhook] """ self._webhooks = webhooks def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, V1beta1MutatingWebhookConfiguration): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
[ "root@kube-node02.local" ]
root@kube-node02.local
c5d18fc4ec58ba588b22d10547e718a9c6c64f06
9644567f9cd3415e6c8b1470fde72ab178bb8eb0
/flask/lib/python2.7/site-packages/flask_ponywhoosh/views.py
45f5a9aeb475a8bb6957dc593b119433011ad8a6
[ "Apache-2.0" ]
permissive
Ahmad31/Web_Flask_Cassandra
01d44ee03fcb457ea3a01629f6fd29870663b8ff
76acb074fce521e904f3b2a41e6ab69571f4369e
refs/heads/master
2021-06-10T02:42:53.494515
2019-11-27T16:22:48
2019-11-27T16:22:48
88,625,344
3
1
Apache-2.0
2021-03-19T22:23:05
2017-04-18T12:59:21
Python
UTF-8
Python
false
false
3,007
py
''' flask_ponywhoosh.views module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Perform full-text searches over your database with Pony ORM and PonyWhoosh, for flask applications. :copyright: (c) 2015-2017 by Jonathan Prieto-Cubides & Felipe Rodriguez. :license: MIT (see LICENSE.md) ''' import re from pprint import pprint from flask import render_template from flask.views import View from form import SearchForm class IndexView(View): """This is all the setting for the template index.html in the templates folder. methods (list): POST and GET """ methods = ['POST', 'GET'] def __init__(self, pw, action_url_form): self._pw = pw self.debug = self._pw.debug self.action_url_form = action_url_form def dispatch_request(self): """ This form is plugeable. That means that all what you need to do is to install the package and run the url :: /ponywhoosh/ (You may change it in the config) and get the results. Returns: Results: The results are sent to the template using bootstrap. They are renderized using whether a grid or a table, depending on what models did you register. By default the first field registered is considered the one that will be contained in the tittle of each searh result. """ ctx = {'form' : SearchForm()} except_field = None query, fields = None, None wildcards = True form = SearchForm() if self.debug: print 'form:' pprint(form.data) if form.validate_on_submit(): add_wildcards = form.add_wildcards.data except_fields = re.split('\W+', form.except_field.data, flags=re.UNICODE) fields = re.split('\W+', form.fields.data, flags=re.UNICODE) models = re.split('\W+', form.models.data, flags=re.UNICODE) query = form.query.data something = form.something.data results = self._pw.search( query , add_wildcards=add_wildcards , something=something , include_entity=True , fields=fields , models=models , except_fields=except_fields , use_dict=False ) if self.debug: print 'form = ', pprint({ 'query': query , 'add_wildcards': add_wildcards , 'something': something , 'include_entity': True , 'fields': fields , 'models': models , 'except_fields': except_fields }) print "results = " pprint(results) return render_template( 'ponywhoosh/results.html' , entidades=list(self._pw._entities.keys()) , action_url_form=self.action_url_form , form=form , results=results , n=results['cant_results'] , labels=results['results'].keys() ) return render_template( 'ponywhoosh/index.html' , form=form , action_url_form=self.action_url_form , query=query )
[ "aku.anwar.aan@gmail.com" ]
aku.anwar.aan@gmail.com
4d75e4757f1aec208f56653737558c8d8f20f81c
9a50339b63586a405d16acf732f877d17742f45b
/phase/migrations/0003_phasesubcategory_category.py
488d0763a9d715cb7a1161ce37f23b6542996d66
[]
no_license
ahsanhabib98/PPPsPerformance
777798caa1e41fbcf9d084b8166cae2e9628b3ef
1a9af5eb447cac81f9dc929a74e3fddf21d87588
refs/heads/master
2020-04-22T23:35:52.842941
2019-02-14T19:51:03
2019-02-14T19:51:03
170,747,015
0
0
null
null
null
null
UTF-8
Python
false
false
508
py
# Generated by Django 2.0.5 on 2019-01-29 15:10 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('phase', '0002_auto_20190129_2108'), ] operations = [ migrations.AddField( model_name='phasesubcategory', name='category', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='phase.PhaseCategory'), ), ]
[ "ahredoan@gmail.com" ]
ahredoan@gmail.com
75a1839ac3f3ee5ea8197ccce4cc2e3fc8b2821b
1462c42bef31c022040b4cae73a96d852e857b51
/loaner/web_app/backend/api/shelf_api.py
de3b892d75dd8ee46ea078402abd727c6ff6615c
[ "Apache-2.0" ]
permissive
Getechsupport/getechgraband-go
f5e54754a2f300bfd2c8be54edd386841c5fae6c
6d2a040ef9617fabd8c691ec1c787cf5ec9edb73
refs/heads/master
2022-08-17T12:11:06.299622
2020-03-10T16:22:34
2020-03-10T16:22:34
243,598,161
0
0
Apache-2.0
2022-07-07T17:22:55
2020-02-27T19:21:54
Python
UTF-8
Python
false
false
8,087
py
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The entry point for the Shelf methods.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import logging from protorpc import message_types from google.appengine.api import datastore_errors import endpoints from loaner.web_app.backend.api import auth from loaner.web_app.backend.api import permissions from loaner.web_app.backend.api import root_api from loaner.web_app.backend.api.messages import shelf_messages from loaner.web_app.backend.lib import api_utils from loaner.web_app.backend.lib import search_utils from loaner.web_app.backend.lib import user from loaner.web_app.backend.models import device_model from loaner.web_app.backend.models import shelf_model _SHELF_DOES_NOT_EXIST_MSG = ( 'The shelf with location: %s does not exist. Please double ' 'check the location.') _DEVICE_DOES_NOT_EXIST_MSG = ( 'The device_identifier: %s is either not enrolled or an invalid serial ' 'number has been entered.') @root_api.ROOT_API.api_class(resource_name='shelf', path='shelf') class ShelfApi(root_api.Service): """This class is for the Shelf API.""" @auth.method( shelf_messages.EnrollShelfRequest, message_types.VoidMessage, name='enroll', path='enroll', http_method='POST', permission=permissions.Permissions.MODIFY_SHELF) def enroll(self, request): """Enrolls a shelf in the program.""" user_email = user.get_user_email() self.check_xsrf_token(self.request_state) try: shelf_model.Shelf.enroll( user_email=user_email, friendly_name=request.friendly_name, location=request.location, latitude=request.latitude, longitude=request.longitude, altitude=request.altitude, capacity=request.capacity, audit_notification_enabled=request.audit_notification_enabled, responsible_for_audit=request.responsible_for_audit, audit_interval_override=request.audit_interval_override, ) except (shelf_model.EnrollmentError, datastore_errors.BadValueError) as err: raise endpoints.BadRequestException(str(err)) return message_types.VoidMessage() @auth.method( shelf_messages.ShelfRequest, shelf_messages.Shelf, name='get', path='get', http_method='POST', permission=permissions.Permissions.READ_SHELVES) def get(self, request): """Gets a shelf based on location.""" self.check_xsrf_token(self.request_state) return api_utils.build_shelf_message_from_model(get_shelf(request)) @auth.method( shelf_messages.ShelfRequest, message_types.VoidMessage, name='disable', path='disable', http_method='POST', permission=permissions.Permissions.MODIFY_SHELF) def disable(self, request): """Disables a shelf by its location.""" self.check_xsrf_token(self.request_state) user_email = user.get_user_email() shelf = get_shelf(request) shelf.disable(user_email) return message_types.VoidMessage() @auth.method( shelf_messages.UpdateShelfRequest, message_types.VoidMessage, name='update', path='update', http_method='POST', permission=permissions.Permissions.MODIFY_SHELF) def update(self, request): """Gets a shelf using location to update its properties.""" self.check_xsrf_token(self.request_state) user_email = user.get_user_email() shelf = get_shelf(request.shelf_request) kwargs = api_utils.to_dict(request, shelf_model.Shelf) shelf.edit(user_email=user_email, **kwargs) return message_types.VoidMessage() @auth.method( shelf_messages.Shelf, shelf_messages.ListShelfResponse, name='list', path='list', http_method='POST', permission=permissions.Permissions.READ_SHELVES) def list_shelves(self, request): """Lists enabled or all shelves based on any shelf attribute.""" self.check_xsrf_token(self.request_state) if request.page_size <= 0: raise endpoints.BadRequestException( 'The value for page_size must be greater than 0.') query, sort_options, returned_fields = ( search_utils.set_search_query_options(request.query)) if not query: query = search_utils.to_query(request, shelf_model.Shelf) offset = search_utils.calculate_page_offset( page_size=request.page_size, page_number=request.page_number) search_results = shelf_model.Shelf.search( query_string=query, query_limit=request.page_size, offset=offset, sort_options=sort_options, returned_fields=returned_fields) total_pages = search_utils.calculate_total_pages( page_size=request.page_size, total_results=search_results.number_found) shelves_messages = [] for document in search_results.results: message = search_utils.document_to_message( document, shelf_messages.Shelf()) message.shelf_request = shelf_messages.ShelfRequest() message.shelf_request.urlsafe_key = document.doc_id message.shelf_request.location = message.location shelves_messages.append(message) return shelf_messages.ListShelfResponse( shelves=shelves_messages, total_results=search_results.number_found, total_pages=total_pages) @auth.method( shelf_messages.ShelfAuditRequest, message_types.VoidMessage, name='audit', path='audit', http_method='POST', permission=permissions.Permissions.AUDIT_SHELF) def audit(self, request): """Performs an audit on a shelf based on location.""" self.check_xsrf_token(self.request_state) shelf = get_shelf(request.shelf_request) user_email = user.get_user_email() devices_on_shelf = [] shelf_string_query = 'shelf: {}'.format(shelf.key.urlsafe()) devices_retrieved_on_shelf = device_model.Device.search(shelf_string_query) for device_identifier in request.device_identifiers: device = device_model.Device.get(identifier=device_identifier) if not device: raise endpoints.NotFoundException( _DEVICE_DOES_NOT_EXIST_MSG % device_identifier) if device.shelf: if device.shelf == shelf.key: devices_on_shelf.append(device.key.urlsafe()) logging.info('Device %s is already on shelf.', device.identifier) continue try: device.move_to_shelf(shelf=shelf, user_email=user_email) devices_on_shelf.append(device.key) except device_model.UnableToMoveToShelfError as err: raise endpoints.BadRequestException(str(err)) for device in devices_retrieved_on_shelf.results: if device.doc_id not in devices_on_shelf: api_utils.get_ndb_key(device.doc_id).get().remove_from_shelf( shelf=shelf, user_email=user_email) shelf.audit(user_email=user_email, num_of_devices=len(devices_on_shelf)) return message_types.VoidMessage() def get_shelf(request): """Gets a shelf using the location. Args: request: shelf_messages.ShelfRequest, the request message for a shelf. Returns: Shelf object. Raises: endpoints.NotFoundException when a shelf can not be found. """ if request.urlsafe_key: shelf = api_utils.get_ndb_key(request.urlsafe_key).get() else: shelf = shelf_model.Shelf.get(location=request.location) if not shelf: raise endpoints.NotFoundException( _SHELF_DOES_NOT_EXIST_MSG % request.location) return shelf
[ "email" ]
email
2e257e337febe1511c2d9627bf695efa744fc08f
b3e9a8963b9aca334b93b95bc340c379544e1046
/euler/53.py
848c9371fe27dc537186b3b102125dfaaa2271db
[]
no_license
protocol7/euler.py
86ea512c2c216968e6c260b19469c0c8d038feb7
e2a8e46a9b07e6d0b039a5496059f3bf73aa5441
refs/heads/master
2022-09-08T22:49:47.486631
2022-08-23T20:07:00
2022-08-23T20:07:00
169,478,759
0
0
null
null
null
null
UTF-8
Python
false
false
481
py
#!/usr/bin/env python3 from functools import lru_cache @lru_cache(maxsize=None) def factorial(n): if n == 1: return 1 return n * factorial(n-1) assert 24 == factorial(4) def combos(n, r): return factorial(n) / (factorial(r) * factorial(n - r)) assert 1144066 == combos(23, 10) def find(): c = 0 for n in range(1, 101): for r in range(1, n): if combos(n, r) > 1000000: c += 1 return c assert 4075 == find()
[ "niklas@protocol7.com" ]
niklas@protocol7.com
258d8ef0453118d55ac07e5b1516bb5578fe9f11
646cadb1c72ef4a060343baf2fcbe271958b6878
/tigerjython/Pi2GoEx/Ligth1a.py
36d10f7d5715c2c72b027fbe61929c76909918e4
[]
no_license
tigerjython/tjinstall
bd75cf8e4ae27b639a13865ef1ec5710391a2938
aab61519b5299c2ab4f423c6fc5d8ea7c7860a99
refs/heads/master
2021-01-17T08:53:50.386905
2018-01-12T06:56:28
2018-01-12T06:56:28
40,659,466
0
0
null
null
null
null
UTF-8
Python
false
false
281
py
# Light1a.py from raspibrick import * def onDark(value): print "dark event with v:", value def onBright(value): print "bright event with v:", value robot = Robot() ls = LightSensor(LS_FRONT_LEFT) while not robot.isEscapeHit(): continue robot.exit() print "All done"
[ "support@tigerjython.com" ]
support@tigerjython.com
a51bcd3afd92f603fd92b6ce8517474dbb4ae72b
d741f71eb48b23fdda1339daee10ccb039da1ee6
/leetcode7.py
f66dd37a370eca0129bd9f10c89bc749bd2596fd
[]
no_license
HawkinYap/Leetcode
a5b42bf3aa41e7f1ba60a5a804909035fa8e1ec9
5e7bc7368db4dfcd8597dc0462a8a0b5bfd46e54
refs/heads/master
2020-06-30T07:15:24.453955
2020-04-18T07:38:22
2020-04-18T07:38:22
199,663,089
0
0
null
null
null
null
UTF-8
Python
false
false
577
py
class Solution: def reverse(self, x): """ :type x: int :rtype: int """ strx = str(x) if strx[0] == '-': strx = strx[len(strx)-1:0:-1] if int(strx) < 2 ** 31-1: return(-int(strx)) else: return(0) else: strx = strx[::-1] if int(strx) < 2 ** 31-1: return(int(strx)) else: return(0) if __name__ == '__main__': num = 123 solution = Solution() print(solution.reverse(num))
[ "Gallowsgogo@gmail.com" ]
Gallowsgogo@gmail.com
a0babf5d131fe0f8d29fd070ae89e73f9667fea3
9e4f5cf305e818caccb543b9b0c60876afc682c2
/Python-Matic-SDK/examples/deploy.py
7f36ee16da523d8d51697000b451f539e59d4dba
[ "MIT" ]
permissive
bellyfat/Matic-for-python-developers
1ed2062e1826d7847ade449d47a311901c1b53dd
ea33737a89384ce9e6f6dc97b12f0a608994e28d
refs/heads/main
2023-07-16T04:48:19.934602
2021-09-12T15:21:09
2021-09-12T15:21:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
293
py
from maticvigil.EVCore import EVCore evc = EVCore(verbose=False) r = evc.deploy( contract_file='microblog.sol', contract_name='Microblog', inputs={ '_ownerName': 'anomit', '_blogTitle': 'TheBlog' } ) print('Contract Address deployed at') print(r['contract'])
[ "noreply@github.com" ]
bellyfat.noreply@github.com
cc01ee431f998edf052d5fb58191cf1d5e445aa9
aa91f6e8d59286f65e7f6ed065823c80b7694439
/scripts/lab/fasttext/f1.py
80a8f85cdbe7ed8c70a4524cd5fa968237443bd0
[ "MIT" ]
permissive
davidyuqiwei/davidyu_stock
7f93bcc2c50a0e2c356e3b517dbf7e2e7467093f
c234911f49d5980f2dff651333f8ca957333e094
refs/heads/master
2023-01-07T02:08:04.330559
2023-01-02T16:31:37
2023-01-02T16:31:37
163,694,812
13
2
null
2022-12-18T03:55:10
2018-12-31T20:07:30
Jupyter Notebook
UTF-8
Python
false
false
8,678
py
# _*_ coding:utf-8 _*_ ''' @Author: Ruan Yang @Date: 2018.12.9 @Purpose: 处理wikizh文本的二分类问题,判断语句是否通顺 @Attention: 本例中的负样本是 shuffle 正样例得到,所以容易形成分类 @算法:CNN @本例是二分类问题 ''' import codecs paths=r"/home/davidyu/gits/fastText-Study" train_data_name="train.txt" test_data_name="test.txt" x_train=[] x_test=[] y_train=[] y_test=[] x_train_positive=0 x_train_negative=0 x_test_positive=0 x_test_negative=0 with codecs.open(paths+train_data_name,"r","utf-8") as f1,\ codecs.open(paths+test_data_name,"r","utf-8") as f2: for line in f1: words=line.strip().split("\t") if words[0] == "__label__1": y_train.append([0,1]) # [0,1] 表示正样例 x_train_positive += 1 else: y_train.append([1,0]) # [1,0] 表示负样例 x_train_negative += 1 x_train.append(words[1]) for line in f2: words=line.strip().split("\t") if words[0] == "__label__1": y_test.append([0,1]) x_test_positive += 1 else: y_test.append([1,0]) x_test_negative += 1 x_test.append(words[1]) print("#----------------------------------------------------------#") print("训练集总数:{}".format(len(x_train))) print("训练集中正样本个数:{}".format(x_train_positive)) print("训练集中负样本个数:{}".format(x_train_negative)) print("测试集总数:{}".format(len(x_test))) print("测试集中正样本个数:{}".format(x_test_positive)) print("测试集中负样本个数:{}".format(x_test_negative)) print("#----------------------------------------------------------#") print("\n") print("#----------------------------------------------------------#") print("将输入文本转换成 index - word 对应关系,并输出词汇表") x_text=x_train+x_test # 总输入文本 y_labels=y_train+y_test ''' from tensorflow.contrib import learn import tensorflow as tf import numpy as np import collections max_document_length=200 min_frequency=1 vocab = learn.preprocessing.VocabularyProcessor(max_document_length,min_frequency, tokenizer_fn=list) x = np.array(list(vocab.fit_transform(x_text))) vocab_dict = collections.OrderedDict(vocab.vocabulary_._mapping) with codecs.open(r"C:\Users\RY\Desktop\vocabulary.txt","w","utf-8") as f: for key,value in vocab_dict.items(): f.write("{} {}\n".format(key,value)) print("#----------------------------------------------------------#") print("\n") print("#----------------------------------------------------------#") print("数据混洗") np.random.seed(10) y=np.array(y_labels) shuffle_indices = np.random.permutation(np.arange(len(y))) x_shuffled = x[shuffle_indices] y_shuffled = y[shuffle_indices] test_sample_percentage=0.2 test_sample_index = -1 * int(test_sample_percentage * float(len(y))) x_train, x_test = x_shuffled[:test_sample_index], x_shuffled[test_sample_index:] y_train, y_test = y_shuffled[:test_sample_index], y_shuffled[test_sample_index:] print("#----------------------------------------------------------#") print("\n") print("#----------------------------------------------------------#") print("读取預训练词向量矩阵") pretrainpath=r"E:\中科大MS\預训练模型\\" embedding_index={} with codecs.open(pretrainpath+"sgns.wiki.word","r","utf-8") as f: #for line in f: # if len(line.strip().split(" "))==2: # nwords=int(line.strip().split(" ")[0]) # ndims=int(line.strip().split(" ")[1]) # else: # values=line.split() # words=values[0] # coefs=np.asarray(values[1:],dtype="float32") # embedding_index[word]=coefs line=f.readline() nwords=int(line.strip().split(" ")[0]) ndims=int(line.strip().split(" ")[1]) for line in f: values=line.split() words=values[0] coefs=np.asarray(values[1:],dtype="float32") embedding_index[words]=coefs print("預训练模型中Token总数:{} = {}".format(nwords,len(embedding_index))) print("預训练模型的维度:{}".format(ndims)) print("#----------------------------------------------------------#") print("\n") print("#----------------------------------------------------------#") print("将vocabulary中的 index-word 对应关系映射到 index-word vector形式") embedding_matrix=[] notfoundword=0 for word in vocab_dict.keys(): if word in embedding_index.keys(): embedding_matrix.append(embedding_index[word]) else: notfoundword += 1 embedding_matrix.append(np.random.uniform(-1,1,size=ndims)) embedding_matrix=np.array(embedding_matrix,dtype=np.float32) # 必须使用 np.float32 print("词汇表中未找到单词个数:{}".format(notfoundword)) print("#----------------------------------------------------------#") print("\n") print("#----------------------------------------------------------#") print("构建CNN模型.................") print("Embedding layer --- Conv1D layer --- Dense layer --- Dense layer") from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.layers import Embedding from keras.layers import Conv1D, GlobalMaxPooling1D max_sentence_length=200 embedding_dims=ndims input_length=max_sentence_length batch_size = 64 filters = 250 kernel_size = 3 hidden_dims = 250 dropout=0.5 num_classes=2 epochs = 2 model = Sequential() model.add(Embedding(len(vocab_dict), embedding_dims, weights=[embedding_matrix], input_length=max_sentence_length, trainable=False)) model.add(Dropout(dropout)) model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) model.add(GlobalMaxPooling1D()) model.add(Dense(hidden_dims)) model.add(Dropout(dropout)) model.add(Activation('relu')) model.add(Dense(num_classes)) model.add(Activation('sigmoid')) print("#----------------------------------------------------------#") print("\n") print("#----------------------------------------------------------#") print("编译模型") model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print("#----------------------------------------------------------#") print("\n") print("#----------------------------------------------------------#") print("模型拟合") model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test)) print("#----------------------------------------------------------#") print("\n") print("#----------------------------------------------------------#") print("模型评估") # 训练得分和准确度 score,acc=model.evaluate(x_test,y_test,batch_size=batch_size) print("#---------------------------------------------------#") print("预测得分:{}".format(score)) print("预测准确率:{}".format(acc)) print("#---------------------------------------------------#") print("\n") # 模型预测 predictions=model.predict(x_test) print("#---------------------------------------------------#") print("测试集的预测结果,对每个类有一个得分/概率,取值大对应的类别") print(predictions) print("#---------------------------------------------------#") print("\n") # 模型预测类别 predict_class=model.predict_classes(x_test) print("#---------------------------------------------------#") print("测试集的预测类别") print(predict_class) print("#---------------------------------------------------#") print("\n") # 模型保存 model.save(r"C:\Users\RY\Desktop\wikizh_cnn.h5") print("#---------------------------------------------------#") print("保存模型") print("#---------------------------------------------------#") print("\n") # 模型总结 print("#---------------------------------------------------#") print("输出模型总结") print(model.summary()) print("#---------------------------------------------------#") print("\n") # 模型的配置文件 config=model.get_config() print("#---------------------------------------------------#") print("输出模型配置信息") print(config) print("#---------------------------------------------------#") print("\n")
[ "davidyuqiwei@outlook.com" ]
davidyuqiwei@outlook.com
a05d56db574368bf8580a4bd6667b10b1c1cae4d
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
/benchmark/smsdroid/testcase/firstcases/testcase7_000.py
9a66eb49f61b413902721cb5c99ff03f5fba9cc3
[]
no_license
Prefest2018/Prefest
c374d0441d714fb90fca40226fe2875b41cf37fc
ac236987512889e822ea6686c5d2e5b66b295648
refs/heads/master
2021-12-09T19:36:24.554864
2021-12-06T12:46:14
2021-12-06T12:46:14
173,225,161
5
0
null
null
null
null
UTF-8
Python
false
false
7,530
py
#coding=utf-8 import os import subprocess import time import traceback from appium import webdriver from appium.webdriver.common.touch_action import TouchAction from selenium.common.exceptions import NoSuchElementException, WebDriverException desired_caps = { 'platformName' : 'Android', 'deviceName' : 'Android Emulator', 'platformVersion' : '4.4', 'appPackage' : 'de.ub0r.android.smsdroid', 'appActivity' : 'de.ub0r.android.smsdroid.ConversationListActivity', 'resetKeyboard' : True, 'androidCoverage' : 'de.ub0r.android.smsdroid/de.ub0r.android.smsdroid.JacocoInstrumentation', 'noReset' : True } def command(cmd, timeout=5): p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True) time.sleep(timeout) p.terminate() return def getElememt(driver, str) : for i in range(0, 5, 1): try: element = driver.find_element_by_android_uiautomator(str) except NoSuchElementException: time.sleep(1) else: return element os.popen("adb shell input tap 50 50") element = driver.find_element_by_android_uiautomator(str) return element def getElememtBack(driver, str1, str2) : for i in range(0, 2, 1): try: element = driver.find_element_by_android_uiautomator(str1) except NoSuchElementException: time.sleep(1) else: return element for i in range(0, 5, 1): try: element = driver.find_element_by_android_uiautomator(str2) except NoSuchElementException: time.sleep(1) else: return element os.popen("adb shell input tap 50 50") element = driver.find_element_by_android_uiautomator(str2) return element def swipe(driver, startxper, startyper, endxper, endyper) : size = driver.get_window_size() width = size["width"] height = size["height"] try: driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper), end_y=int(height * endyper), duration=1000) except WebDriverException: time.sleep(1) driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper), end_y=int(height * endyper), duration=1000) return def scrollToFindElement(driver, str) : for i in range(0, 5, 1): try: element = driver.find_element_by_android_uiautomator(str) elements = driver.find_elements_by_android_uiautomator(str) if (len(elements) > 1) : for temp in elements : if temp.get_attribute("enabled") == "true" : element = temp break except NoSuchElementException: swipe(driver, 0.5, 0.55, 0.5, 0.2) else : return element for i in range(0, 4, 1): try: element = driver.find_element_by_android_uiautomator(str) elements = driver.find_elements_by_android_uiautomator(str) if (len(elements) > 1): for temp in elements: if temp.get_attribute("enabled") == "true": element = temp break except NoSuchElementException: swipe(driver, 0.5, 0.2, 0.5, 0.55) else : return element return def scrollToClickElement(driver, str) : element = scrollToFindElement(driver, str) if element is None : return else : element.click() def clickInList(driver, str) : element = None if (str is None) : candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView") if len(candidates) >= 1 and checkWindow(driver): element = candidates[len(candidates)-1] else : element = scrollToFindElement(driver, str) if element is not None : element.click() else : if checkWindow(driver) : driver.press_keycode(4) def clickOnCheckable(driver, str, value = "true") : parents = driver.find_elements_by_class_name("android.widget.LinearLayout") for parent in parents: try : parent.find_element_by_android_uiautomator(str) lists = parent.find_elements_by_class_name("android.widget.LinearLayout") if len(lists) == 1 : innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)") nowvalue = innere.get_attribute("checked") if (nowvalue != value) : innere.click() break except NoSuchElementException: continue def typeText(driver, value) : element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")") element.clear() element.send_keys(value) enterelement = getElememt(driver, "new UiSelector().text(\"OK\")") if (enterelement is None) : if checkWindow(driver): driver.press_keycode(4) else : enterelement.click() def checkWindow(driver) : dsize = driver.get_window_size() nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size if dsize['height'] > nsize['height']: return True else : return False def testingSeekBar(driver, str, value): try : if(not checkWindow(driver)) : element = seekForNearestSeekBar(driver, str) else : element = driver.find_element_by_class_name("android.widget.SeekBar") if (None != element): settingSeekBar(driver, element, value) driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click() except NoSuchElementException: time.sleep(1) def seekForNearestSeekBar(driver, str): parents = driver.find_elements_by_class_name("android.widget.LinearLayout") for parent in parents: try : parent.find_element_by_android_uiautomator(str) lists = parent.find_elements_by_class_name("android.widget.LinearLayout") if len(lists) == 1 : innere = parent.find_element_by_class_name("android.widget.SeekBar") return innere break except NoSuchElementException: continue def settingSeekBar(driver, element, value) : x = element.rect.get("x") y = element.rect.get("y") width = element.rect.get("width") height = element.rect.get("height") TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform() y = value def clickInMultiList(driver, str) : element = None if (str is None) : candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView") if len(candidates) >= 1 and checkWindow(driver): element = candidates[len(candidates)-1] else : element = scrollToFindElement(driver, str) if element is not None : nowvalue = element.get_attribute("checked") if (nowvalue != "true") : element.click() if checkWindow(driver) : driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click() # testcase7_000 try : starttime = time.time() driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps) element = getElememtBack(driver, "new UiSelector().text(\"66560866\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)") TouchAction(driver).tap(element).perform() element = getElememtBack(driver, "new UiSelector().text(\"7:52 AM\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)") TouchAction(driver).tap(element).perform() element = getElememtBack(driver, "new UiSelector().text(\"Call 2131231\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)") TouchAction(driver).tap(element).perform() except Exception, e: print 'FAIL' print 'str(e):\t\t', str(e) print 'repr(e):\t', repr(e) print traceback.format_exc() else: print 'OK' finally: cpackage = driver.current_package endtime = time.time() print 'consumed time:', str(endtime - starttime), 's' command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"7_000\"") jacocotime = time.time() print 'jacoco time:', str(jacocotime - endtime), 's' driver.quit() if (cpackage != 'de.ub0r.android.smsdroid'): cpackage = "adb shell am force-stop " + cpackage os.popen(cpackage)
[ "prefest2018@gmail.com" ]
prefest2018@gmail.com
635db05bfab567250fafbb0dbbf2098cf6aebaad
2324dea2cb3003c8ab7e8fd80588d44973eb8c77
/Euler_1_31a.py
92b658671b702b3c61f6fb98880583468f309a1e
[]
no_license
MikeOcc/MyProjectEulerFiles
5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56
4d066d52380aade215636953589bf56d6b88f745
refs/heads/master
2021-01-16T18:45:44.133229
2015-05-27T18:28:43
2015-05-27T18:28:43
5,876,116
0
0
null
null
null
null
UTF-8
Python
false
false
1,765
py
# # Euler 31 # # from itertools import * #print list(product([200,100,50,20,10,5,2,1] , repeat = 200)) cur = [100, 50, 20, 10, 5, 2, 1] fac = [ 2, 4, 10, 20, 40, 100, 200] val = 200 ctr = 8 cntdown = 8 cnt = 200 for i in range(0,2): cnt-=i*cur[0] print cnt,cur[0] if cnt == 0: cnt = 200 ctr += 1;print ctr break print "Level 1:" for j in range(0,4): if cnt == 0:break print cnt,cur[1] cnt-=j*cur[1] if cnt == 0: cnt = 200 ctr += 1;print ctr break print "Level 2:" for k in range(0,11): if cnt == 0:break cnt-=k*cur[2] print cnt,cur[0] if cnt == 0: cnt = 200 ctr += 1;print ctr break print "Level 3:" for l in range(0,21): if cnt == 0:break cnt-=l*cur[3] if cnt == 0: cnt = 200 ctr += 1;print ctr break print "Level 4:" for m in range(1,41): if cnt == 0:break cnt-=m*cur[4] print cnt,cur[0] if cnt == 0: ctr += 1;print ctr break print "Level 5:" for n in range(0,101): if cnt == 0:break cnt-=n*cur[5] print cnt,cur[5] if cnt == 0: cnt = 200 ctr += 1;print ctr break print "Level 6:" for o in range(0,201): if cnt == 0:break cnt-=o*cur[6] print cnt,cur[6] if cnt == 0: cnt = 200 ctr += 1;print ctr break print "total number of permutations is " , ctr
[ "mike.occhipinti@mlsassistant.com" ]
mike.occhipinti@mlsassistant.com
64900c6259cb79e029298d9d679b01edffaba0e0
66b13f9aa90bb0091684334c68a574c390fb58a1
/gen-cards.py
2f95a50ff3eee68a49acce58d153ab71aa31fb67
[ "MIT" ]
permissive
sugar-activities/4246-activity
d67c8038698b4fd5dab19ff9bf9ea2c3647692e8
5ddd5fe21b2ddfc5f926e434b7f8a23e20f54263
refs/heads/master
2021-01-19T23:14:32.545612
2017-04-21T05:05:53
2017-04-21T05:05:53
88,937,416
0
0
null
null
null
null
UTF-8
Python
false
false
6,603
py
# -*- coding: utf-8 -*- #Copyright (c) 2009, Walter Bender #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in #all copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. RED_STROKE = "#FF6040" RED_FILL = "#FFC4B8" BLUE_STROKE = "#0060C8" BLUE_FILL = "#ACC8E4" GREEN_STROKE = "#00B418" GREEN_FILL = "#AFE8A8" PURPLE_STROKE = "#780078" PURPLE_FILL = "#E4AAE4" color_pairs = ([RED_STROKE,RED_FILL], [GREEN_STROKE,GREEN_FILL], [BLUE_STROKE,BLUE_FILL], [PURPLE_STROKE,PURPLE_FILL]) fill_styles = ("none","gradient","solid") card_types = ("X","O","C") def background(f,fill): f.write("<rect width=\"74.5\" height=\"124.5\" rx=\"11\" ry=\"9\" x=\"0.25\" y=\"0.25\"\n") f.write("style=\"fill:" + fill + ";fill-opacity:1;stroke:#000000;stroke-width:0.5\" />\n") def header(f,fill): f.write("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n") f.write("<!-- Created with Emacs -->\n") f.write("<svg\n") f.write(" xmlns:svg=\"http://www.w3.org/2000/svg\"\n") f.write(" xmlns=\"http://www.w3.org/2000/svg\"\n") f.write(" version=\"1.0\"\n") f.write(" width=\"75\"\n") f.write(" height=\"125\">\n") background(f,fill) f.write("<g>\n") def footer(f): f.write("</g>\n") f.write("</svg>\n") def circle(f, y, style, stroke, fill): f.write("<circle cx=\"27\" cy=\"11\" r=\"16\"\n") f.write(" transform=\"translate(11," + str(y+11) + ")\"\n") if style == "none": f.write(" style=\"fill:#FFFFFF;stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") elif style == "gradient": f.write(" style=\"fill:" + fill + ";stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") else: f.write(" style=\"fill:" + stroke + ";stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") f.write("<circle cx=\"27\" cy=\"11\" r=\"8\"\n") f.write(" transform=\"translate(11," + str(y+11) + ")\"\n") f.write(" style=\"fill:" + fill + ";stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") def check(f, y, style, stroke, fill): f.write("<path d=\"m 28.3575,70.160499 -5.861,5.861 -5.861,-5.866001 -4.102,-4.1 c -0.747,-0.747999 -1.212,-1.784999 -1.212,-2.93 0,-2.288998 1.854,-4.145998 4.146,-4.145998 1.143,0 2.18,0.465 2.93,1.214 l 4.099,4.101999 14.102,-14.102998 c 0.754,-0.749 1.787,-1.214 2.934,-1.214 2.289,0 4.146,1.856001 4.146,4.145001 0,1.146 -0.467,2.18 -1.217,2.932 l -14.104,14.104997 z\"\n") f.write(" transform=\"translate(10," + str(y-40) + ")\"\n") if style == "none": f.write(" style=\"fill:#FFFFFF;stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") elif style == "gradient": f.write(" style=\"fill:" + fill + ";stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") else: f.write(" style=\"fill:" + stroke + ";stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") def cross(f, y, style, stroke, fill): f.write("<path d=\"m 33.3585,62.5035 10.102,10.1 c 0.752,0.75 1.217,1.783 1.217,2.932 0,2.287 -1.855,4.143 -4.146,4.143 -1.145,0 -2.178,-0.463 -2.932,-1.211 l -10.102,-10.103 -10.1,10.1 c -0.75,0.75 -1.787,1.211 -2.934,1.211 -2.284,0 -4.143,-1.854 -4.143,-4.141 0,-1.146 0.465,-2.184 1.212,-2.934 l 10.104,-10.102 -10.102,-10.1 c -0.747,-0.748 -1.212,-1.785 -1.212,-2.93 0,-2.289 1.854,-4.146 4.146,-4.146 1.143,0 2.18,0.465 2.93,1.214 l 10.099,10.102 10.102,-10.103 c 0.754,-0.749 1.787,-1.214 2.934,-1.214 2.289,0 4.146,1.856 4.146,4.145 0,1.146 -0.467,2.18 -1.217,2.932 l -10.104,10.105 z\"\n") f.write(" transform=\"translate(10," + str(y-40) + ")\"\n") if style == "none": f.write(" style=\"fill:#FFFFFF;stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") elif style == "gradient": f.write(" style=\"fill:" + fill + ";stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") else: f.write(" style=\"fill:" + stroke + ";stroke:" + stroke + \ ";stroke-width:1.8;\" />\n") def check_card(f, n, style, stroke, fill): if n == 1: check(f, 41.5,style, stroke, fill) elif n == 2: check(f, 21.5,style, stroke, fill) check(f, 61.5,style, stroke, fill) else: check(f, 1.5,style, stroke, fill) check(f, 41.5,style, stroke, fill) check(f, 81.5,style, stroke, fill) def cross_card(f, n, style, stroke, fill): if n == 1: cross(f, 41.5,style, stroke, fill) elif n == 2: cross(f, 21.5,style, stroke, fill) cross(f, 61.5,style, stroke, fill) else: cross(f, 1.5,style, stroke, fill) cross(f, 41.5,style, stroke, fill) cross(f, 81.5,style, stroke, fill) def circle_card(f, n, style, stroke, fill): if n == 1: circle(f, 41.5,style, stroke, fill) elif n == 2: circle(f, 21.5,style, stroke, fill) circle(f, 61.5,style, stroke, fill) else: circle(f, 1.5,style, stroke, fill) circle(f, 41.5,style, stroke, fill) circle(f, 81.5,style, stroke, fill) def open_file(i): return file("images/card-"+str(i)+".svg", "w") def close_file(f): f.close() i = 0 for t in card_types: for c in color_pairs: for n in range(1,4): for s in fill_styles: i += 1 f = open_file(i) header(f,c[1]) if t == "O": circle_card(f,n,s,c[0],c[1]) elif t == "C": check_card(f,n,s,c[0],c[1]) else: cross_card(f,n,s,c[0],c[1]) footer(f) close_file(f)
[ "ignacio@sugarlabs.org" ]
ignacio@sugarlabs.org
ea08b87bbcf0c0cf423313d7133d5767e513cc77
e73f0bd1e15de5b8cb70f1d603ceedc18c42b39b
/adventOfCode/2018/25/25.py
361d1e18d2d39530b5332aaf11d459fb81790990
[]
no_license
thran/the_code
cbfa3b8be86c3b31f76f6fbd1deb2013d3326a4a
ba73317ddc42e10791a829cc6e1a3460cc601c44
refs/heads/master
2023-01-05T14:39:16.708461
2022-12-25T08:37:39
2022-12-25T08:37:39
160,978,160
2
0
null
null
null
null
UTF-8
Python
false
false
705
py
import queue import pandas as pd import numpy as np df = pd.read_csv('input.txt', header=None) distances = df.loc[:, :4] df['selected'] = False df['constellation'] = None value = 3 c = 0 while (~df.selected).sum() > 0: x = df.index[~df.selected][0] q = queue.Queue() q.put(x) c += 1 df.loc[x, 'selected'] = True df.loc[x, 'constellation'] = c while not q.empty(): i = q.get() d = df.loc[i] new = (np.abs(distances - d[:4]).sum(axis=1) <= 3) & ~df.selected df.loc[new, 'selected'] = True df.loc[new, 'constellation'] = c for n in df[new].index.values: q.put(n) print(df) print(len(df.constellation.unique()))
[ "thran@centrum.cz" ]
thran@centrum.cz
ae9068dfc07c4682726c4eb7283a242f767290cd
17ebca3c537e27bb1dff0d8f047ad63c1d7ebcf0
/Tests/goldman_02.22.2021.py
ddb9353ab5e51c092092c223b1b2d5b46d2d4272
[]
no_license
reading-stiener/For-the-love-of-algos
ad95283b55bfaf112302bf187fe0cefd6104f3f7
d6f0d0985ac124eeb5ad1caee8563d3d0f186a25
refs/heads/master
2023-03-07T10:51:31.851212
2021-02-23T05:27:08
2021-02-23T05:27:08
292,468,905
1
0
null
null
null
null
UTF-8
Python
false
false
1,121
py
def numOfIds(pool): count_eights = pool.count("8") count_rest = len(pool) - count_eights group_tens = count_rest // 10 if count_eights <= group_tens: return count_eights else: eights_less = 11 - count_rest % 10 if count_eights - group_tens >= eights_less: return group_tens + 1 + (count_eights - group_tens - eights_less) // 11 return group_tens def maxInversions1(arr): count = 0 n = len(arr) for i in range(n): for j in range(i+1, n): if arr[i] > arr[j]: for k in range(j+1, n): if arr[j] > arr[k]: count += 1 return count def maxInversions(arr): count = 0 n = len(arr) for i in range(1, n-1): lt_count, gt_count = 0, 0 for j in range(i+1, n): if arr[i] > arr[j]: lt_count += 1 for k in range(0, i): if arr[i] < arr[k]: gt_count += 1 count += lt_count * gt_count return count if __name__ == "__main__": print(maxInversions([5,3,4,2,1]))
[ "apradha1@conncoll.edu" ]
apradha1@conncoll.edu
d53bc612dcccd471686a70bf5243cfdd00224af9
1f41b828fb652795482cdeaac1a877e2f19c252a
/maya_menus/_MAINMENU_PMC_Rigging/28.Constraint/10.Tangent Constraint( curve, upObject, targets ).py
189af8a0a6847beda077c61b234044b4bb945a5c
[]
no_license
jonntd/mayadev-1
e315efe582ea433dcf18d7f1e900920f5590b293
f76aeecb592df766d05a4e10fa2c2496f0310ca4
refs/heads/master
2021-05-02T07:16:17.941007
2018-02-05T03:55:12
2018-02-05T03:55:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
208
py
from sgMaya import sgCmds import pymel.core sels = pymel.core.ls( sl=1 ) curve = sels[0] upObject = sels[1] targets = sels[2:] for target in targets: sgCmds.tangentConstraint( curve, upObject, target )
[ "kimsung9k@naver.com" ]
kimsung9k@naver.com
d1034770b6e251685d6bebfce77b1a4c9de647a3
d7ccb4225f623139995a7039f0981e89bf6365a4
/.history/mall/settings_20211012161345.py
b5f7915f5c98d58a5332fffcaf4ae14c3dbc5b92
[]
no_license
tonnymuchui/django-mall
64fd4abc3725c1bd0a3dcf20b93b490fe9307b37
55c083d8433be3c77adc61939cd197902de4ce76
refs/heads/master
2023-08-23T04:59:20.418732
2021-10-13T15:59:37
2021-10-13T15:59:37
415,668,388
1
0
null
null
null
null
UTF-8
Python
false
false
4,187
py
""" Django settings for mall project. Generated by 'django-admin startproject' using Django 3.2.5. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ import django_heroku import dj_database_url from pathlib import Path import os # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent TEMPLATE_DIR = os.path.join(BASE_DIR,"templates") # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-#l0ij4e$3v@&xi3i#y$19f#_@z(yv+5yw$kc+02!-)g%ny%oi8' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'category', 'accounts', 'store', 'carts', 'whitenoise.runserver_nostatic', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mall.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [TEMPLATE_DIR,], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'category.context_processors.menu_links', 'carts.context_processors.counter', ], }, }, ] WSGI_APPLICATION = 'mall.wsgi.application' AUTH_USER_MODEL = 'accounts.Account' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ # STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') STATIC_URL = '/static/' # STATIC_ROOT = BASE_DIR / 'staticfiles' # STATIC_ROOT = BASE_DIR /'static' STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), ] # media files configuration MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR /'media' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # WHITENOISE_USE_FINDERS = True # STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' # Activate Django-Heroku. # django_heroku.settings(locals())
[ "tonykanyingah@gmail.com" ]
tonykanyingah@gmail.com
6cc0eea8dd978ecbdfed41def4734537c2ada346
4bcb9dd0701dacd001cace60e0d6543c86b4ef4b
/PostYourFoodAd/migrations/0003_auto_20170422_1232.py
709fc44f49897fcf4f4e28ae44ad572807f80baf
[]
no_license
kbidve/Hostelied_commit
8576d317060021fec68cbcf5051b2bc32e461aba
2d439ab979dcb38c9cfa156799d194ab50f964bf
refs/heads/master
2021-01-22T20:34:52.827418
2017-05-19T14:46:45
2017-05-19T14:46:45
85,331,991
0
0
null
null
null
null
UTF-8
Python
false
false
2,071
py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-04-22 12:32 from __future__ import unicode_literals import django.contrib.gis.db.models.fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('UserAdministrator', '0002_auto_20170328_2311'), ('PostYourFoodAd', '0002_auto_20170422_0828'), ] operations = [ migrations.CreateModel( name='Thali_Details', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('vegetable', models.IntegerField(default=1)), ('roti', models.IntegerField(default=1)), ('dal', models.IntegerField(default=1)), ('rice', models.IntegerField(default=1)), ('salad', models.CharField(default='some string', max_length=200)), ], ), migrations.AddField( model_name='mess_info', name='address', field=models.CharField(default='some string', max_length=200), ), migrations.AddField( model_name='mess_info', name='description', field=models.CharField(default='some string', max_length=500), ), migrations.AddField( model_name='mess_info', name='location', field=django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326), ), migrations.AddField( model_name='mess_info', name='user_id', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='mess', to='UserAdministrator.UserInfo'), ), migrations.AddField( model_name='thali_details', name='thali_details', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='thali_details', to='PostYourFoodAd.Mess_Info'), ), ]
[ "root@localhost.localdomain" ]
root@localhost.localdomain
fc3413eab8a3777a066cf659f7a1f321ad1121b6
cb7c3673ad937c282a39be74d0aee8628e75928d
/tests/test_scripts/test_gen_jsonld.py
d38e0436a0414fd74dea9cbb7fa269db34e04ed9
[ "CC0-1.0" ]
permissive
bpow/linkml
649d6d48f39a8c51efa92fba7eb25c1d8854b472
ab83c0caee9c02457ea5a748e284dee6b547fcd6
refs/heads/main
2023-05-05T18:46:04.501897
2021-05-13T21:17:03
2021-05-13T21:17:03
371,163,928
0
0
CC0-1.0
2021-05-26T20:42:13
2021-05-26T20:42:12
null
UTF-8
Python
false
false
4,620
py
import os import re import unittest # This has to occur post ClickTestCase from functools import reduce from typing import List, Tuple import click from rdflib import Graph, URIRef from linkml import METAMODEL_NAMESPACE from linkml.generators.jsonldcontextgen import ContextGenerator from linkml.generators import jsonldgen from tests.test_scripts.environment import env from tests.utils.clicktestcase import ClickTestCase cwd = os.path.dirname(__file__) meta_context = 'file:./output/gencontext/meta.jsonld' repl: List[Tuple[str, str]] = [ (r'"source_file_size": [0-9]+', ''), (r'"source_file_date": "[^"]+"', ''), (r'"generation_date": "[^"]+"', ''), (r'"source_file": "[^"]+"', '') ] def filtr(txt: str) -> str: return reduce(lambda s, expr: re.sub(expr[0], expr[1], s), repl, txt) class GenJSONLDTestCase(ClickTestCase): testdir = "genjsonld" click_ep = jsonldgen.cli prog_name = "gen-jsonld" env = env def test_help(self): self.do_test("--help", 'help') def test_meta(self): self.temp_file_path('meta.jsonld') self.do_test(f"--context {meta_context}", 'meta.jsonld', filtr=filtr) self.do_test(f'-f jsonld --context {meta_context}', 'meta.jsonld', filtr=filtr) self.do_test(f'-f xsv --context {meta_context}', 'meta_error', expected_error=click.exceptions.BadParameter) def check_size(self, g: Graph, g2: Graph, root: URIRef, expected_classes: int, expected_slots: int, expected_types: int, expected_subsets: int, expected_enums: int, model: str) -> None: """ Check :param g: :param g2: :param root: :param expected_classes: :param expected_slots: :param expected_types: :param expected_subsets: :param expected_enums: :param model: :return: """ for graph in [g, g2]: n_classes = len(list(graph.objects(root, METAMODEL_NAMESPACE.classes))) n_slots = len(list(graph.objects(root, METAMODEL_NAMESPACE.slots))) n_types = len(list(graph.objects(root, METAMODEL_NAMESPACE.types))) n_subsets = len(list(graph.objects(root, METAMODEL_NAMESPACE.subsets))) n_enums = len(list(graph.objects(root, METAMODEL_NAMESPACE.enums))) self.assertEqual(expected_classes, n_classes, f"Expected {expected_classes} classes in {model}") self.assertEqual(expected_slots, n_slots, f"Expected {expected_slots} slots in {model}") self.assertEqual(expected_types, n_types, f"Expected {expected_types} types in {model}") self.assertEqual(expected_subsets, n_subsets, f"Expected {expected_subsets} subsets in {model}") self.assertEqual(expected_enums, n_enums, f"Expected {expected_enums} enums in {model}") def test_meta_output(self): """ Generate a context AND a jsonld for the metamodel and make sure it parses as RDF """ tmp_jsonld_path = self.temp_file_path('metajson.jsonld') tmp_rdf_path = self.temp_file_path('metardf.ttl') tmp_meta_context_path = self.temp_file_path('metacontext.jsonld') # Generate an image of the metamodel gen = ContextGenerator(env.meta_yaml, importmap=env.import_map) base = gen.namespaces[gen.schema.default_prefix] if str(base)[-1] not in '/#': base += '/' schema = base + "meta" # Generate context with open(tmp_meta_context_path, 'w') as tfile: tfile.write(gen.serialize()) # Generate JSON with open(tmp_jsonld_path, 'w') as tfile: tfile.write(jsonldgen.JSONLDGenerator(env.meta_yaml, fmt=jsonldgen.JSONLDGenerator.valid_formats[0], importmap=env.import_map).serialize(context=tmp_meta_context_path)) # Convert JSON to TTL g = Graph() g.load(tmp_jsonld_path, format="json-ld") g.serialize(tmp_rdf_path, format="ttl") g.bind('meta', METAMODEL_NAMESPACE) new_ttl = g.serialize(format="turtle").decode() # Make sure that the generated TTL matches the JSON-LD (probably not really needed, as this is more of a test # of rdflib than our tooling but it doesn't hurt new_g = Graph() new_g.parse(data=new_ttl, format="turtle") # Make sure that both match the expected size (classes, slots, types, and model name for error reporting) self.check_size(g, new_g, URIRef(schema), 17, 122, 14, 1, 1, "meta") if __name__ == '__main__': unittest.main()
[ "solbrig@jhu.edu" ]
solbrig@jhu.edu
a063a23ba0ef1a9b820232f028f24b1b463d0067
a6fd4108016deab24ed5e4d528a4c009b597bf01
/info_manager/migrations/0005_auto_20190927_1057.py
db9f59b6a8145a322760a05e004c10fbf5330d38
[ "Apache-2.0" ]
permissive
UniversitaDellaCalabria/info-manager
2da8f4087ed180c05a710a7d17dff7e71814dac9
f4be641467f65d591ec28bbdf6ed681cdfed0e80
refs/heads/master
2022-12-04T17:56:56.349256
2022-01-05T22:50:12
2022-01-05T22:50:12
198,625,639
1
0
Apache-2.0
2022-11-22T09:47:21
2019-07-24T11:51:11
Python
UTF-8
Python
false
false
648
py
# Generated by Django 2.2.2 on 2019-09-27 08:57 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('info_manager', '0004_itemtranslation'), ] operations = [ migrations.AddField( model_name='item', name='created', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='item', name='modified', field=models.DateTimeField(auto_now=True), ), ]
[ "giuseppe.demarco@unical.it" ]
giuseppe.demarco@unical.it
a9947e37c78fdb42ea17249f91dbf7f64fc41328
a708f1d36586d2b01c99f2cb44aa4612b10192f6
/bfs/733图像渲染.py
7dabfbbb5f104bfb03e623af54d96f324c7e0529
[]
no_license
LeopoldACC/Algorithm
2477e8a371e9cdc5a47b582ca2a454539b96071e
fc1b0bec0e28d31e9a6ff722b3a66eacb0278148
refs/heads/master
2023-01-25T02:28:14.422447
2020-12-03T15:01:10
2020-12-03T15:01:10
197,297,197
2
0
null
null
null
null
UTF-8
Python
false
false
662
py
class Solution: def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]: if not image: return image dirs = [(-1,0),(1,0),(0,-1),(0,1)] q = collections.deque([(sr,sc)]) ori = image[sr][sc] m,n = len(image),len(image[0]) visit = set() while q: x,y = q.popleft() image[x][y] = newColor visit.add((x,y)) for dx,dy in dirs: nx,ny = x+dx,y+dy if 0<=nx<m and 0<=ny<n and image[nx][ny] == ori and (nx,ny) not in visit: q.append((nx,ny)) return image
[ "zhenggong9831@gmail.com" ]
zhenggong9831@gmail.com
99fc9c9e5797d077f8302e1cc617697317de6463
e23778e9cbedd6a9fcd28eb4263dab33da4a2d5a
/clowningaround/urls.py
80d331a8cd46688fc9c8832a1a1843527b1e6924
[]
no_license
eustone/clowingaround
92667e2580c80dddf4489a1724d44e8c9823e215
1723d9aa5645510f711f0ab6478174b0aa66d84e
refs/heads/master
2022-12-09T02:35:24.270583
2020-04-03T07:08:38
2020-04-03T07:08:38
252,653,240
0
0
null
2022-12-08T03:58:11
2020-04-03T06:44:31
Python
UTF-8
Python
false
false
1,065
py
"""clowningaround URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import include,path urlpatterns = [ path('admin/', admin.site.urls), path('accounts/', include('allauth.urls')), path('api-auth/', include('rest_framework.urls', namespace='rest_framework')), path('api/v1/appointments/', include('appointments.urls', namespace='appointments')), path('auth/', include('djoser.urls')), path('', include('users.urls')), ]
[ "emalaya@gmail.com" ]
emalaya@gmail.com
fc54124378398c17d473cfeb673ecf5b28789142
cdf4e1b9db499766780d28762f6c71ac2e438a90
/getting_file_dates.py
fff141b456cacfb572b9bc589156326889adbecf
[]
no_license
sa-i/20200414py3interm
cd33af18bbb6b4ac98308905244cf71b014666e9
6bcb0a871f3894f4eb1a53b792ea5cc849d302e7
refs/heads/master
2022-05-09T08:52:11.242286
2020-04-17T21:57:45
2020-04-17T21:57:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
301
py
#!/usr/bin/env python import os from datetime import datetime for file_name in os.listdir(): if file_name.endswith('.py'): raw_file_timestamp = os.path.getmtime(file_name) file_timestamp = datetime.fromtimestamp(raw_file_timestamp) print(file_timestamp.date(), file_name)
[ "jstrickler@gmail.com" ]
jstrickler@gmail.com
b8e48ca6481e0eb739d7e2645271b4d774c19b9a
27c04f9daf823c851bef35f91d261c677295d00c
/backend/asgi.py
185382269e27551e31283442e62bb9840e7e8111
[]
no_license
wlgud0402/crawling_estate_stock_backend
865e6ed8811892c0823a8ac2cd11dba45dcdca2e
dfe2e06ccc3e993fd8cf9fe641d1aed097e375ad
refs/heads/master
2023-04-29T16:13:03.224377
2021-05-25T15:48:57
2021-05-25T15:48:57
349,053,194
0
0
null
null
null
null
UTF-8
Python
false
false
167
py
import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings') application = get_asgi_application()
[ "wlgudrlgus@naver.com" ]
wlgudrlgus@naver.com
dac54cc515d226209428a3bc5cc34eb7813b2419
8eab8ab725c2132bb8d090cdb2d23a5f71945249
/virt/Lib/site-packages/jedi/third_party/typeshed/third_party/2/tornado/httpclient.pyi
16bdb2856786d54de4bf56a7c7a95877725004ff
[ "MIT", "Apache-2.0" ]
permissive
JoaoSevergnini/metalpy
6c88a413a82bc25edd9308b8490a76fae8dd76ca
c2d0098a309b6ce8c756ff840bfb53fb291747b6
refs/heads/main
2023-04-18T17:25:26.474485
2022-09-18T20:44:45
2022-09-18T20:44:45
474,773,752
3
1
MIT
2022-11-03T20:07:50
2022-03-27T22:21:01
Python
UTF-8
Python
false
false
3,219
pyi
from typing import Any from tornado.util import Configurable class HTTPClient: def __init__(self, async_client_class=..., **kwargs) -> None: ... def __del__(self): ... def close(self): ... def fetch(self, request, **kwargs): ... class AsyncHTTPClient(Configurable): @classmethod def configurable_base(cls): ... @classmethod def configurable_default(cls): ... def __new__(cls, io_loop=..., force_instance=..., **kwargs): ... io_loop: Any defaults: Any def initialize(self, io_loop, defaults=...): ... def close(self): ... def fetch(self, request, callback=..., raise_error=..., **kwargs): ... def fetch_impl(self, request, callback): ... @classmethod def configure(cls, impl, **kwargs): ... class HTTPRequest: proxy_host: Any proxy_port: Any proxy_username: Any proxy_password: Any url: Any method: Any body_producer: Any auth_username: Any auth_password: Any auth_mode: Any connect_timeout: Any request_timeout: Any follow_redirects: Any max_redirects: Any user_agent: Any decompress_response: Any network_interface: Any streaming_callback: Any header_callback: Any prepare_curl_callback: Any allow_nonstandard_methods: Any validate_cert: Any ca_certs: Any allow_ipv6: Any client_key: Any client_cert: Any ssl_options: Any expect_100_continue: Any start_time: Any def __init__( self, url, method=..., headers=..., body=..., auth_username=..., auth_password=..., auth_mode=..., connect_timeout=..., request_timeout=..., if_modified_since=..., follow_redirects=..., max_redirects=..., user_agent=..., use_gzip=..., network_interface=..., streaming_callback=..., header_callback=..., prepare_curl_callback=..., proxy_host=..., proxy_port=..., proxy_username=..., proxy_password=..., allow_nonstandard_methods=..., validate_cert=..., ca_certs=..., allow_ipv6=..., client_key=..., client_cert=..., body_producer=..., expect_100_continue=..., decompress_response=..., ssl_options=..., ) -> None: ... @property def headers(self): ... @headers.setter def headers(self, value): ... @property def body(self): ... @body.setter def body(self, value): ... class HTTPResponse: request: Any code: Any reason: Any headers: Any buffer: Any effective_url: Any error: Any request_time: Any time_info: Any def __init__( self, request, code, headers=..., buffer=..., effective_url=..., error=..., request_time=..., time_info=..., reason=... ) -> None: ... body: bytes def rethrow(self): ... class HTTPError(Exception): code: Any response: Any def __init__(self, code, message=..., response=...) -> None: ... class _RequestProxy: request: Any defaults: Any def __init__(self, request, defaults) -> None: ... def __getattr__(self, name): ... def main(): ...
[ "joao.a.severgnini@gmail.com" ]
joao.a.severgnini@gmail.com
162669aaa9caa5ed418da539dc6c63763eae311f
61361b4338c7434ae5fcfb225ef327c0e5bc7b5e
/etsydb/etsy/migrations/0003_auto_20151206_0002.py
b89556d6baba547e97cf83f04c93ade61426a01d
[]
no_license
mdamien/scrapy-etsy
a253284f6bcfc1cd04f551ddfb6d0cdb9bf0a9c3
3c379b4dafa181f11007c2e6902bb2689140a335
refs/heads/master
2021-01-10T07:31:03.894077
2016-01-23T22:32:14
2016-01-23T22:32:14
47,045,325
1
1
null
null
null
null
UTF-8
Python
false
false
608
py
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2015-12-06 00:02 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('etsy', '0002_auto_20151206_0001'), ] operations = [ migrations.AlterField( model_name='product', name='rating_count', field=models.IntegerField(null=True), ), migrations.AlterField( model_name='product', name='rating_score', field=models.FloatField(null=True), ), ]
[ "damien@dam.io" ]
damien@dam.io
803f20b222f2a786b77c9b6d52e761c321aa2191
5ea136ca2e8066b77b39afdf15e3d0e6bc74761f
/scripts/generate-solve-files.py
dd2e19d28cb4d8b659ccf6ea89552ca96714641b
[]
no_license
reneang17/ttbar
4023421846a70c22c13a2962520f7723ad35636b
75f4fff1b5d79af097ea04aab437e2963215a232
refs/heads/master
2020-06-12T15:07:11.918815
2019-06-28T22:24:07
2019-06-28T22:24:07
194,339,531
0
0
null
null
null
null
UTF-8
Python
false
false
4,010
py
#!/usr/bin/env python3 # # todo: # import argparse import os import re import pickle import stat import pyIdSolver parser = argparse.ArgumentParser() parser.add_argument("file", \ help="list of prototypes to be solved") parser.add_argument("--num", "-n", action="store_true",\ help="reduce with numerical values") args = parser.parse_args() #------------------------------------------------------------------------------- # Generate batch scripts to be run on cluster #------------------------------------------------------------------------------- def generate_batch_files(proto_list, parstr): proto_re = re.compile('PR') proto_re = re.compile('PR(\d+)') # iterate over integral files (hence effectively the integrals to solve) for i in range(0,len(proto_list)): PRname = "PR{0}".format(proto_list[i]) fname = "batch-integral{0}.sh".format(i) # determine list of required databases matching_file_list = [] matchingsfile = "{0}matchings".format(PRname) db_numbers = [] with open(matchingsfile) as fh: tmpstr = fh.read() db_numbers = proto_re.findall(tmpstr) # content content = "#\n" content += "INTEGRALFILE=tmpintegral{0}\n".format(i) content += "OUTFILE=out{0}\n".format(i) content += "STDOUTFILE=stdout{0}\n".format(i) content += "\n" content += "DIR=/afs/cern.ch/user/s/sapeta/workspace/work/ttbar/idsolver/nnlo\n" content += "\n" content += "cp $DIR/solve_integrals .\n" content += "cp $DIR/$INTEGRALFILE .\n" for dbnum in db_numbers: content += "cp $DIR/idPR{0}.dat .\n".format(dbnum) content += "cp $DIR/PR{0}inc.dat .\n".format(dbnum) content += "\n" content += "form() {\n" content += " /afs/cern.ch/user/s/sapeta/local/bin/form \"$@\"\n" content += " }\n" content += "export -f form\n" content += "\n" content += "time $DIR/solve_integrals $INTEGRALFILE $OUTFILE {0} > $STDOUTFILE\n".\ format(parstr) content += "\n" content += "cp $INTEGRALFILE $DIR/\n" content += "cp $OUTFILE $DIR/\n" content += "cp $STDOUTFILE $DIR/\n" with open(fname, "w") as batchfile: batchfile.write(content) os.chmod(fname, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- def split_integrals_into_sublists(file): integrals = [] prototype_numbers = [] proto_re = re.compile('PR(\d+)') with open(file) as fh: integrals = fh.read().split() #sublists = \ #[integrals[x:x+args.npack] for x in range(0,len(integrals),args.npack)] for i in range(0,len(integrals)): fname = "tmpintegral{0}".format(i) print(fname, integrals[i]) with open(fname, "w") as integral_file: n = int(proto_re.findall(integrals[i])[0]) prototype_numbers.append(n) integral_file.write(integrals[i]+"\n") return prototype_numbers #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # MAIN PART OF THE SCRIPT #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- if __name__ == '__main__': parstr = "ep ap b c qT2" if args.num: parstr = "ep=1/2749 ap=1/3089 b=1/1453 c=1/2857 qT2=1/3557" #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- proto_list = split_integrals_into_sublists(args.file) #----------------------------------------------------------------------------- # generate batch run files #----------------------------------------------------------------------------- generate_batch_files(proto_list,parstr)
[ "reneang17@gmail.com" ]
reneang17@gmail.com