blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb6afdf9dbb7cd9d0a6633a6e2296e70e406b1c3
|
bf63f844c9d3db9ae0293bc6762be53a6ca450b2
|
/helusers/jwt.py
|
735acf51caf9a653141543574b9fd416610362be
|
[
"BSD-2-Clause"
] |
permissive
|
tuomas777/django-helusers
|
93ab292b3b7a884b8ba04f9b24452ee3cc8342a7
|
77252693770410e40191f775462181cc7a3ec2bd
|
refs/heads/master
| 2020-04-10T05:30:45.465682
| 2018-09-14T09:43:06
| 2018-09-14T09:43:06
| 160,829,703
| 0
| 0
|
BSD-2-Clause
| 2018-12-07T13:50:08
| 2018-12-07T13:50:07
| null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
from django.conf import settings
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework_jwt.settings import api_settings
from .user_utils import get_or_create_user
def patch_jwt_settings():
"""Patch rest_framework_jwt authentication settings from allauth"""
defaults = api_settings.defaults
defaults['JWT_PAYLOAD_GET_USER_ID_HANDLER'] = (
__name__ + '.get_user_id_from_payload_handler')
if 'allauth.socialaccount' not in settings.INSTALLED_APPS:
return
from allauth.socialaccount.models import SocialApp
try:
app = SocialApp.objects.get(provider='helsinki')
except SocialApp.DoesNotExist:
return
defaults['JWT_SECRET_KEY'] = app.secret
defaults['JWT_AUDIENCE'] = app.client_id
# Disable automatic settings patching for now because it breaks Travis.
# patch_jwt_settings()
class JWTAuthentication(JSONWebTokenAuthentication):
def authenticate_credentials(self, payload):
return get_or_create_user(payload)
def get_user_id_from_payload_handler(payload):
return payload.get('sub')
|
[
"juha.yrjola@iki.fi"
] |
juha.yrjola@iki.fi
|
0315d6b622ee9399845ac3c750df71dabf3c92b2
|
2967a6fa8065ecb68683b0499f66f65b9ab646c1
|
/Wbudowane_struktury_danych/9_named_tuple/zadanie/main.py
|
802648494ba1c52c2fcb15f4d95f5733ca93eeac
|
[] |
no_license
|
keinam53/Python_Poczatek
|
f285836a4aa7d261f25bcc4add253e894c30e65e
|
ccb05f3918cc94c925055c78627cba28482ce5bb
|
refs/heads/master
| 2023-05-01T06:41:56.414754
| 2021-05-13T19:30:30
| 2021-05-13T19:30:30
| 363,479,245
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from collections import namedtuple
Apple = namedtuple("Apple", ["species_name", "size", "price"])
def run():
apple = Apple("Gala", "M", 2.5)
print(apple.species_name)
print(apple.size)
print(apple.price)
print(apple[0])
print(apple[1])
print(apple[2])
for data in apple:
print(data)
if __name__ == '__main__':
run()
|
[
"mariusz.baran536@gmail.com"
] |
mariusz.baran536@gmail.com
|
10c9ca0234965420c1d7890a0676eac38518ad78
|
d40fbefbd5db39f1c3fb97f17ed54cb7b6f230e0
|
/ibm_db2/tests/test_unit.py
|
b9afacce63cced4dd32e2536a2f191f692792ab6
|
[] |
permissive
|
slightilusion/integrations-core
|
47a170d791e809f3a69c34e2426436a6c944c322
|
8f89e7ba35e6d27c9c1b36b9784b7454d845ba01
|
refs/heads/master
| 2020-05-20T18:34:41.716618
| 2019-05-08T21:51:17
| 2019-05-08T21:51:17
| 185,708,851
| 2
| 0
|
BSD-3-Clause
| 2019-05-09T02:05:19
| 2019-05-09T02:05:18
| null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.ibm_db2.utils import scrub_connection_string
pytestmark = pytest.mark.unit
class TestPasswordScrubber:
def test_start(self):
s = 'pwd=password;...'
assert scrub_connection_string(s) == 'pwd=********;...'
def test_end(self):
s = '...;pwd=password'
assert scrub_connection_string(s) == '...;pwd=********'
def test_no_match_within_value(self):
s = '...pwd=password;...'
assert scrub_connection_string(s) == s
|
[
"noreply@github.com"
] |
slightilusion.noreply@github.com
|
5f697b884de06a2a759dbaf8eff4e587d4b61385
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/initial_8302.py
|
ada636326405c8ccbeb036a5d50f5d02b405d7d7
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,331
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((440, 791, 782), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((225, 173, 486), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((189, 410, 398), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((223, 777, 905), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((478, 789, 82), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((333, 343, 863), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((31, 931, 648), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((870, 666, 358), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((808, 521, 926), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((930, 997, 266), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((361, 198, 927), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((324, 359, 244), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((138, 760, 307), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((105, 308, 227), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((981, 431, 237), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((90, 179, 506), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((62, 80, 966), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((941, 518, 40), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((148, 603, 718), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((755, 407, 323), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((819, 809, 563), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
641a6e7fb1755fe782bd4cfa3de6704b19fd36e6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/gaussiana/ch3_2020_09_09_11_54_52_542373.py
|
b6266cc9513e5cad35ebf1d45a0ba2b74ee8442f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
import math
def calcula_gaussiana(x, mi, sigma):
form = (1/ (sigma((2*math.pi)**(1/2))))* (e**(((-0.5((z-mi)/sigma))**2))
return form
|
[
"you@example.com"
] |
you@example.com
|
24f9ced0b5dd1c194216331452babe0cbcb1b786
|
52e05d43e6c37ee7a586118cf0f390b04e92ada3
|
/76. Minimum Window Substring _ Hash Table.py
|
81639a8ff54abd07d4ebc6079e31945e94369bcd
|
[] |
no_license
|
CaizhiXu/LeetCode-Python-Solutions
|
8f7a856e11e0804f32c43ed98bc08525a950ac13
|
63120dbaabd7c3c19633ebe952bcee4cf826b0e0
|
refs/heads/master
| 2021-05-18T04:57:16.412834
| 2020-08-05T04:33:13
| 2020-08-05T04:33:13
| 251,121,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
class Solution:
def minWindow(self, s: str, t: str) -> str:
ans = float('inf')
minstr = ''
sourcehash = [0] * 256
targethash = [0] * 256
self.init_target_hash(targethash, t)
j = 0
for i in range(len(s)):
while not self.valid(sourcehash, targethash) and j < len(s):
sourcehash[ord[j]] += 1
j += 1
if self.valid(sourcehash, targethash):
if ans > j - i:
ans = j - i
minstr = s[i:j]
sourcehash[ord[i]] -= 1
return minstr
def init_target_hash(self, targethash, t):
for ch in t:
targethash[ord[ch]] = targethash.get(ord[ch], 0) + 1
def valid(self, sourcehash, targethash): # to check whether it is includes in targethash
for i in range(256):
if targethash[i] > sourcehash[i]:
return False
return True
## time, space - O(N)
from collections import Counter, defaultdict
class Solution:
def minWindow(self, s: str, t: str) -> str:
t_cnts = Counter(t)
s_cnts = defaultdict(int)
start, end = 0, 0
match = 0
minstr = ''
minLen = float('inf')
while end < len(s):
s_cnts[s[end]] += 1
if s[end] in t_cnts and s_cnts[s[end]] == t_cnts[s[end]]:
match += 1
end += 1
while match == len(t_cnts):
if end - start < minLen:
minLen = end - start
minstr = s[start:end]
s_cnts[s[start]] -= 1
if s[start] in t_cnts and s_cnts[s[start]] < t_cnts[s[start]]:
match -= 1
start += 1
return minstr
|
[
"xucaizhi@gmail.com"
] |
xucaizhi@gmail.com
|
ce17cfce8c468bf026821bed8c4ba90149ca668a
|
0a3cbf51778ed922c8466af0484e588aa3246c10
|
/main/views.py
|
4167fe0f29133e925a23434a25ad87558c61aab2
|
[] |
no_license
|
minahosam/e-shopper
|
075f1917984424f3eb9eea56ed1cf49bbfaa0789
|
2c601e888bd62b803ab2fe6f59607dacb9868f3e
|
refs/heads/main
| 2023-06-21T17:15:08.454089
| 2021-08-07T18:45:48
| 2021-08-07T18:45:48
| 372,903,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,135
|
py
|
from django.shortcuts import render,redirect
from .models import *
from .forms import *
from Profile.models import *
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
import json
import datetime
from .utils import *
# Create your views here.
def show_all_product(request):
all_brand=brand.objects.all()
all_category=category.objects.all()
all_products=index.objects.all().order_by('-id')[:6]
if request.user.is_authenticated:
cookieContent=cookieCart(request)
item=cookieContent['items']
net_total=cookieContent['net']
total=cookieContent['total']
else:
cookieContent=cookieCart(request)
item=cookieContent['items']
net_total=0
total=cookieContent['total']
return render(request,'main/index.html',{'all_brand':all_brand ,'all_category':all_category,'products':all_products,'total':total})
def product_detail(request,slug):
all_products=index.objects.get(slug=slug)
all_brand=brand.objects.all()
all_category=category.objects.all()
all_reviews=review.objects.all()
if request.method=='POST':
form=reviewForm(request.POST,request.FILES)
if form.is_valid():
form.save()
else:
form=reviewForm()
return render(request,'main/product_details.html',{'products':all_products,'all_brand':all_brand ,'all_category':all_category,'form':form,'revi':all_reviews})
def wish_list(request,slug):
product=index.objects.get(slug=slug)
if request.user in product.add_to_wishlist.all():
product.add_to_wishlist.remove(request.user)
else:
product.add_to_wishlist.add(request.user)
return redirect('main:show')
def wishlist_page(request):
product_wished=index.objects.filter(add_to_wishlist=request.user)
return render(request,'main/wish_page.html',{'wished':product_wished})
def search_by_category(request):
cat=request.GET['category']
print (cat)
all_brand=brand.objects.all()
all_category=category.objects.all()
name_of_category=category.objects.get(category_name=cat)
categore_selected_result=index.objects.filter(product_category=name_of_category.id)
return render(request,'main/search_category.html',{'category':categore_selected_result,'all_brand':all_brand ,'all_category':all_category})
def search_by_brand(request):
brand_=request.GET['brand']
print(brand_)
all_brand=brand.objects.all()
all_category=category.objects.all()
brand_name=brand.objects.get(brand_name = brand_)
brand_selected_result=index.objects.filter(product_brand=brand_name.id)
return render(request,'main/search_brand.html',{'brand':brand_selected_result,'all_brand':all_brand ,'all_category':all_category})
def cart(request):
if request.user.is_authenticated:
cartData=dataCart(request)
item=cartData['items']
net_total=cartData['net']
total=cartData['total']
else:
cookieContent=cookieCart(request)
item=cookieContent['items']
net_total=cookieContent['net']
total=cookieContent['total']
return render(request,'main/cart.html',{'items':item ,'net':net_total,'total':total})
def checkout(request):
if request.user.is_authenticated:
cartData=dataCart(request)
item=cartData['items']
net_total=cartData['net']
total=cartData['total']
coutries=cartData['countries']
states=cartData['state']
shipping_info=shippinginfo.objects.all()
shiped=False
for i in item:
if i.item_order.order_completed ==False:
shiped = True
else:
cookieContent=cookieCart(request)
item=cookieContent['items']
net_total=cookieContent['net']
total=cookieContent['total']
shipping_info=shippinginfo.objects.all()
coutries=country.objects.all()
shiped=False
states=state.objects.filter(state_country__in=coutries)
return render(request,'main/checkout.html',{'items':item , 'net':net_total,'total':total,'countries':coutries,'state':states,'shiped':shiped,'info':shipping_info})
def update_cart(request):
additon=json.loads(request.body)
product=additon['produactId']
product_action=additon['action']
print(product_action,product)
selected_product=index.objects.get(id=product)
order_owner=request.user
requested_user=userprofile.objects.get(usrename=order_owner)
Order , create=order.objects.get_or_create(order_customer=requested_user,order_completed=False)
item , create=orderItem.objects.get_or_create(item_name=selected_product,item_order=Order)
if product_action == 'add':
item.item_quantity = item.item_quantity
elif product_action == 'add2':
item.item_quantity = item.item_quantity + 1
else:
item.item_quantity = item.item_quantity- 1
print('-')
item.save()
print(item.item_quantity)
if item.item_quantity == 0:
item.delete()
if product_action == 'delete':
item.delete()
return JsonResponse('added',safe=False)
def country_name_from_json(request,*args,**kwargs):
selected_country=kwargs.get('country')
states_according_to_country=list(state.objects.filter(state_country__country_name=selected_country).values())
return JsonResponse({'data':states_according_to_country})
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def process_order(request):
transaction_id2=datetime.datetime.now().timestamp()
data=json.loads(request.body)
print(data)
print(transaction_id2)
userc=request.user
print(userc)
if userc.is_authenticated:
user=request.user
print(user)
customer=userprofile.objects.get(username=user)
print(customer)
Order=order.objects.get(order_customer=customer,order_compcleted=False)
total=float(data['shippingData']['total'])
print(total)
order.transaction_id=transaction_id2
item=orderItem.objects.filter(item_order=Order)
net_total=sum([items.total_selected_item for items in item])
if total == net_total:
Order.order_completed=True
Order.save()
shiiping=shippinginfo.objects.create(
shipping_user=customer,shipping_order=Order,shipping_mail=data['shippingData']['email'],
title=data['shippingData']['title'],shipping_firstname=data['shippingData']['firstname'],shiping_middlename=data['shippingData']['middlename'],
shipping_lastname=data['shippingData']['lastname'],shiping_adress1=data['shippingData']['adress1'],shipping_adress2=data['shippingData']['adress2'],
shipping_zipcode=data['shippingData']['zipcode'],shipping_country=data['shippingData']['country'],shipping_state=data['shippingData']['state'],
shipping_phone=data['shippingData']['phone'],shipping_mobile_number=data['shippingData']['mobile_number'],shipping_fax=data['shippingData']['fax']
)
shiiping.save()
else:
print('user not logged in')
return JsonResponse('payment submitted.........',safe=False)
|
[
"you@example.com"
] |
you@example.com
|
4bbb61036ed3e1205a84859392f799268266563b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/3230.py
|
8f3a1c414936539dd2e442282a4b352c344049da
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
'''
Created on Apr 8, 2016
@author: Thomas
'''
import re
import sys
def flip_stack(s):
'''flip the provided stack/substack of pancakes'''
# replace with temporary character
s = s.replace('+', 't')
# switch - for +
s = s.replace('-', '+')
# switch + for -
s = s.replace('t', '-')
return s
def flip(stack, k):
start = stack.find("-")
end = (start + k)
past_end = end - (len(stack) - 1)
if past_end > 0:
start -= past_end
end -= past_end
s_sub = stack[start:end]
stack = stack[:start] + flip_stack(s_sub) + stack[end:]
return stack
def flip_decision(stack, k, num_flips=0):
'''decide what to flip, do the flip, and continue until all happy faces'''
print stack
if "-" in stack:
# Not all Happy Face Pancakes
if ('-' * k) in stack:
num_occ = stack.count('-' * k)
stack = stack.replace(('-' * k), ('+' * k))
num_flips += num_occ
elif stack.find("-") >= 0:
print "pre" + stack
stack = flip(stack, k)
num_flips += 1
print "pos" + stack
if num_flips > len(stack):
return "IMPOSSIBLE"
return flip_decision(stack, k, num_flips)
else:
return num_flips
if __name__ == '__main__':
out = {}
with open("A-small-attempt2.in", 'rb') as f:
lines = f.readlines()[1:]
for idx,line in enumerate(lines):
line = line.rstrip()
pancakes = re.search("[+-]+", line).group(0)
k = int(re.search("[0-9]+", line).group(0))
print line + str("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
num_flips = flip_decision(pancakes, k)
out[idx+1] = num_flips
with open("output.out", 'w') as f:
f.write("")
for key, val in out.iteritems():
line = "Case #" + str(key) + ": " + str(val) + "\n"
f.write(line)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1e5e01255835cf813cfcd8b8b7518254fa4f2372
|
e9173667eec2576782863a51ee63672f9b419297
|
/p56.py
|
e81a5ce0a7732cbeb84457ad982a93ae8b778e8b
|
[] |
no_license
|
sabareesh123/pythonprogamming
|
d41c23ddae183ded09eafde445273126c6b56fcf
|
004f248aa2e25f2855d6ccafbb9244447bfb5873
|
refs/heads/master
| 2020-05-30T06:28:54.901030
| 2019-08-06T11:50:10
| 2019-08-06T11:50:10
| 189,580,451
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
#B
q=input()
count3=0
for j1 in q:
if (j1.isdigit() or j1.isalpha()):
count3+=1
if count3!=0:
print("Yes")
else:
print("No")
|
[
"noreply@github.com"
] |
sabareesh123.noreply@github.com
|
50014adc7f08c346171e0509cbe789c8a4a21a53
|
098662ca9c95151e669753e246d7c158dccad201
|
/temp/playAIvsAI100.py
|
98a645521ab3b1639d89409f7460d2bd114f93f7
|
[] |
no_license
|
ava9/CS6700
|
92dd92081614c3596b880de204e72d3098d85f2f
|
bcad9094a48784635ae8e6081cea4267e3729df0
|
refs/heads/master
| 2021-03-27T18:13:43.680760
| 2017-05-22T16:19:07
| 2017-05-22T16:19:07
| 84,686,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
from board import board
from human import humanInput
from decentAI import decentAI
from randomAI import randomAI
from inorderAI import inorderAI
from minimaxAI100 import minimaxAI100
from miniAIb100 import miniAIb100
from uctAI import uctAI
import random
class play:
p1 = 1
p2 = -1
current = p1
win = 0
userInput = humanInput()
b = board(7, 6)
def __init__(self):
self.current = self.p1
self.win = 0
def begin(self, whoGoesFirst):
#print "Would you like to go first? Enter: [y/n]"
#note that if user enters anything other than "n", user goes first
if (whoGoesFirst == 1):
valid = True
self.current = self.p2
ai = True
if (ai == True):
opp = minimaxAI100() #1
opp2 = miniAIb100() #-1
depth = 4
depth2 = 5
while(self.win == 0):
self.b.update()
if self.b.boardFull() == True:
break
if (ai == True):
if (self.current < 0):
#print "--------AI 2's Move-------"
# 1
self.b.move(self.current, opp2.chooseMove(self.b, self.current, depth2))
elif (self.current > 0):
self.b.move(self.current, opp.chooseMove(self.b, self.current, depth))
valid = True
#print "------AI 1's Move------"
# -1
elif not ai:
valid = True
self.win = self.b.winner(self.current)
if (valid == False):
continue
else:
self.current = self.current * -1
self.b.update()
# update print statement to print ai/user won
#print opp.uctTree
#opp.writeTree()
#print"The winner is "
print self.win
# playAgain = True
# count = 0
# while(playAgain == True):
# count = count + 1
p = play()
# if (count <=50):
p.begin(0)
# else:
# p.begin(1)
# #print "Would you like to play again? Enter: [y/n]"
# #note that if user enters anything other than "n", user plays again
# #if (raw_input() == "n"):
# #playAgain = False
# if (count > 100):
# playAgain = False
# else:
p.b.setUp()
|
[
"szl5@cornell.edu"
] |
szl5@cornell.edu
|
f696ff8d7d9240fa81168d2453e6f4cc46a5e659
|
555b9f764d9bca5232360979460bc35c2f5ad424
|
/google/ads/google_ads/v2/proto/services/ad_group_ad_asset_view_service_pb2_grpc.py
|
1f8bcffe7cd224c6699d2bc6688076e783c5247a
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
juanmacugat/google-ads-python
|
b50256163782bc0223bcd8b29f789d74f4cfad05
|
0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a
|
refs/heads/master
| 2021-02-18T17:00:22.067673
| 2020-03-05T16:13:57
| 2020-03-05T16:13:57
| 245,215,877
| 1
| 0
|
Apache-2.0
| 2020-03-05T16:39:34
| 2020-03-05T16:39:33
| null |
UTF-8
|
Python
| false
| false
| 2,412
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v2.proto.resources import ad_group_ad_asset_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__ad__asset__view__pb2
from google.ads.google_ads.v2.proto.services import ad_group_ad_asset_view_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__ad__asset__view__service__pb2
class AdGroupAdAssetViewServiceStub(object):
"""Proto file describing the ad group ad asset view service.
Service to fetch ad group ad asset views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAdGroupAdAssetView = channel.unary_unary(
'/google.ads.googleads.v2.services.AdGroupAdAssetViewService/GetAdGroupAdAssetView',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__ad__asset__view__service__pb2.GetAdGroupAdAssetViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__ad__asset__view__pb2.AdGroupAdAssetView.FromString,
)
class AdGroupAdAssetViewServiceServicer(object):
"""Proto file describing the ad group ad asset view service.
Service to fetch ad group ad asset views.
"""
def GetAdGroupAdAssetView(self, request, context):
"""Returns the requested ad group ad asset view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdGroupAdAssetViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAdGroupAdAssetView': grpc.unary_unary_rpc_method_handler(
servicer.GetAdGroupAdAssetView,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_ad__group__ad__asset__view__service__pb2.GetAdGroupAdAssetViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_ad__group__ad__asset__view__pb2.AdGroupAdAssetView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.AdGroupAdAssetViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"noreply@github.com"
] |
juanmacugat.noreply@github.com
|
31504b49f2f0d932d6d843c066bd85325a8a5feb
|
00eb801cfd1e4b93f2db564ac8d0b30bdefca90b
|
/githubapi.py
|
67ab73ed8cef04090d15749fbd4f7f7f9753e27c
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
cclauss/repo-tools
|
048f376f17f49ebbb028a5ddcb354ea3fb2e3db4
|
3fb40a6da5191fbdda91f3a6a4b1b0b91d7cf18f
|
refs/heads/master
| 2020-04-16T11:34:21.676617
| 2019-01-03T22:01:19
| 2019-01-03T22:01:19
| 165,541,674
| 0
| 0
|
Apache-2.0
| 2019-01-13T18:49:20
| 2019-01-13T18:49:20
| null |
UTF-8
|
Python
| false
| false
| 5,651
|
py
|
"""Access to information using the GitHub API."""
from __future__ import print_function
import operator
import pprint
import dateutil.parser
from urlobject import URLObject
from helpers import paginated_get, requests
from models import PullRequestBase
class JsonAttributeHelper(object):
@classmethod
def from_json(cls, issues_data):
for issue_data in issues_data:
if not cls.want_this_json_object(issue_data):
continue
yield cls(issue_data)
@classmethod
def want_this_json_object(cls, obj):
return True
def attribute_lookup(self, name, field_map, mapped_fields=None):
obj = None
for field_names, value in field_map:
if name in field_names:
obj = value
break
if obj is not None:
if mapped_fields:
name = mapped_fields.get(name, name)
val = self.deep_getitem(obj, name)
if name.endswith('_at') and val is not None:
val = dateutil.parser.parse(val)
return val
raise AttributeError("Nope: don't have {!r} attribute on {}".format(name, self.__class__.__name__))
def deep_getitem(self, val, key):
for k in key.split("."):
if val is None:
break
val = val[k]
return val
class PullRequest(JsonAttributeHelper, PullRequestBase):
def __init__(self, issue_data):
self._issue = issue_data
if 0:
print("---< Issue >---------------------------------")
pprint.pprint(issue_data)
self._pull = None
self.labels = [self.short_label(l['name']) for l in self.labels]
@classmethod
def want_this_json_object(cls, obj):
pr_url = obj.get('pull_request', {}).get('url')
return bool(pr_url)
ISSUE_FIELDS = {
'assignee_login',
'closed_at',
'comments',
'comments_url',
'created_at',
'labels',
'number',
'pull_request_url',
'pull_request_html_url',
'state',
'title',
'updated_at',
'user_html_url',
'user_login',
}
PULL_FIELDS = {
'additions',
'base_ref',
'changed_files',
'commits',
'deletions',
'merged_at',
}
MAPPED_FIELDS = {
'assignee_login': 'assignee.login',
'base_ref': 'base.ref',
'pull_request_url': 'pull_request.url',
'pull_request_html_url': 'pull_request.html_url',
'user_login': 'user.login',
'user_html_url': 'user.html_url',
}
def __getattr__(self, name):
return self.attribute_lookup(
name,
[(self.ISSUE_FIELDS, self._issue), (self.PULL_FIELDS, self._pull)],
self.MAPPED_FIELDS
)
def load_pull_details(self, pulls=None):
"""Get pull request details also.
`pulls` is a dictionary of pull requests, to perhaps avoid making
another request.
"""
if pulls:
self._pull = pulls.get(self.number)
if not self._pull:
self._pull = requests.get(self.pull_request_url).json()
if 0:
print("---< Pull Request >--------------------------")
pprint.pprint(self._pull)
class Comment(JsonAttributeHelper):
def __init__(self, obj):
self._comment = obj
FIELDS = {
'body',
'created_at',
'user_login',
}
def __getattr__(self, name):
return self.attribute_lookup(
name,
[(self.FIELDS, self._comment)],
{'user_login': 'user.login'},
)
def get_pulls(owner_repo, labels=None, state="open", since=None, org=False, pull_details=None):
"""
Get a bunch of pull requests (actually issues).
`pull_details` indicates how much information you want from the associated
pull request document. None means just issue information is enough. "list"
means the information available when listing pull requests is enough. "all"
means you need all the details. See the GitHub API docs for the difference:
https://developer.github.com/v3/pulls/
"""
url = URLObject("https://api.github.com/repos/{}/issues".format(owner_repo))
if labels:
url = url.set_query_param('labels', ",".join(labels))
if since:
url = url.set_query_param('since', since.isoformat())
if state:
url = url.set_query_param('state', state)
url = url.set_query_param('sort', 'updated')
issues = PullRequest.from_json(paginated_get(url))
if org:
issues = sorted(issues, key=operator.attrgetter("org"))
pulls = None
if pull_details == "list":
issues = list(issues)
if issues:
# Request a bunch of pull details up front, for joining to. We can't
# ask for exactly the ones we need, so make a guess.
limit = int(len(issues) * 1.5)
pull_url = URLObject("https://api.github.com/repos/{}/pulls".format(owner_repo))
if state:
pull_url = pull_url.set_query_param('state', state)
pulls = { pr['number']: pr for pr in paginated_get(pull_url, limit=limit) }
for issue in issues:
if pull_details:
issue.load_pull_details(pulls=pulls)
issue.id = "{}.{}".format(owner_repo, issue.number)
yield issue
def get_comments(pull):
url = URLObject(pull.comments_url).set_query_param("sort", "created").set_query_param("direction", "desc")
comments = Comment.from_json(paginated_get(url))
return comments
|
[
"ned@nedbatchelder.com"
] |
ned@nedbatchelder.com
|
50a644d7e27f7cc2ea8d42c87fccae68515309ce
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_9/jnrkhy001/question2.py
|
d1392f0535dfa0b41ee30c71baaaf0b3483de830
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,547
|
py
|
# Khyati Jinerdeb
# Assignment 9
# Date: 17.05.2014
# to program a text file to make them of same length
def readF(filename):
newLines = [] #To create a list
file = open(filename,"r") #To open the file
lines = file.readlines() #To read lines into a string
file.close() #To close the file
for line in lines:
newLines.append(line.replace('\n','')) #To replace all the command for empty lines by an empty string(to remove all \n)
return newLines #To add it to the list also
def setLines(l,w):
s = ''
tmpL = ['']
i = 0
for line in l:
if len(line) > 1:
words = line.split(' ')
for word in words:
if (len(tmpL[i])+len(word)) <= w :
tmpL[i] += word+" "
elif word == 'a':
tmpL[i] += word
else:
tmpL.append(word+" ")
i += 1
else:
tmpL.append('\n')
i += 1
return tmpL
def writeF(filename,lines):
#write contents of outList to output file
f = open(filename,'w')
for line in lines:
print(line,file=f)
f.close()
def main():
inputF = input("Enter the input filename:\n")
outputF = input("Enter the output filename:\n")
wid = eval(input("Enter the line width:\n"))
lines = readF(inputF)
lines = setLines(lines,wid)
writeF(outputF,lines)
main()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
4fae9ac1bbf6df3f073e7624c668d4427c7807a7
|
9cff940d26e8c7ca7431c6d6516072c65cefa00c
|
/testRNN.py
|
16627f8f32b001e61b9a87911e6d7579e7942cfb
|
[] |
no_license
|
sumitparw/NLP_Sentiment-Analysis-using-lstm
|
4a90dd842e24592b432ef47113fa1f17a2c0f2cf
|
6c1cc7717999cb16089376fe27a1e48e5b8ce2c7
|
refs/heads/master
| 2020-12-26T22:44:08.935738
| 2020-02-01T20:21:53
| 2020-02-01T20:21:53
| 237,672,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,795
|
py
|
import nltk
import random
import pandas as pd
from nltk.tokenize import word_tokenize
import string
import re
import numpy as np
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import accuracy_score,f1_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,LSTM,Dropout
from keras.optimizers import Adam
class rnn():
word_dict=dict()
max_cap=80
def assign_label(self,x):
if x[2] < 3.0 : return "negative"
elif x[2] > 3.0 : return "positive"
else: return "neutral"
def clean_document(self,doco):
punctuation = string.punctuation + '\n\n';
punc_replace = ''.join([' ' for s in punctuation]);
doco_clean = doco.replace('-', ' ');
doco_alphas = re.sub(r'\W +', '', doco_clean)
trans_table = str.maketrans(punctuation, punc_replace);
doco_clean = ' '.join([word.translate(trans_table) for word in doco_alphas.split(' ')]);
doco_clean = doco_clean.split(' ');
doco_clean = [word.lower() for word in doco_clean if len(word) > 0];
return doco_clean;
def return_train_test_data_rnn(self,file_path):
df = pd.read_csv(file_path,header=None)
df = df[df.columns[2:4]]
df[2] = df.apply(self.assign_label, axis=1)
inx = df[df[2]=='neutral'].index
df.drop(inx,inplace=True)
df[2] = df[2].map({'negative': 0, 'positive': 1})
reviews = np.array(df[3].to_list())
labels = np.array(df[2].to_list())
review_cleans = [self.clean_document(doc) for doc in reviews];
sentences = [' '.join(r) for r in review_cleans]
tokenizer = Tokenizer();
tokenizer.fit_on_texts(sentences);
text_sequences = np.array(tokenizer.texts_to_sequences(sentences));
sequence_dict = tokenizer.word_index;
self.word_dict = dict((num, val) for (val, num) in sequence_dict.items());
reviews_encoded = [];
for i, review in enumerate(review_cleans):
reviews_encoded.append([sequence_dict[x] for x in review]);
lengths = [len(x) for x in reviews_encoded];
with plt.xkcd():
plt.hist(lengths, bins=range(100))
max_cap = 80;
X = pad_sequences(reviews_encoded, maxlen=max_cap, truncating='post')
Y = np.array([[0,1] if label == 0 else [1,0] for label in labels])
np.random.seed(1024);
random_posits = np.arange(len(X))
np.random.shuffle(random_posits);
# Shuffle X and Y
X = X[random_posits];
Y = Y[random_posits];
# Divide the reviews into Training, Dev, and Test data.
train_cap = int(0.70 * len(X));
dev_cap = int(0.85 * len(X));
X_train, Y_train = X[:train_cap], Y[:train_cap];
X_dev, Y_dev = X[train_cap:dev_cap], Y[train_cap:dev_cap];
X_test, Y_test = X[dev_cap:], Y[dev_cap:]
return X_train,Y_train,X_dev,Y_dev,X_test,Y_test
def build_model(self):
model = Sequential();
model.add(Embedding(len(self.word_dict), self.max_cap, input_length=self.max_cap));
model.add(LSTM(80, return_sequences=True, recurrent_dropout=0.2));
model.add(Dropout(0.2))
model.add(LSTM(80, recurrent_dropout=0.2));
model.add(Dense(80, activation='relu'));
model.add(Dense(2, activation='softmax'));
print(model.summary());
return model
def train_model(self,X_train,Y_train,X_dev,Y_dev):
model= self.build_model()
optimizer = Adam(lr=0.01, decay=0.001);
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# fit model
model.fit(X_train, Y_train, batch_size=600, epochs=1, validation_data=(X_dev, Y_dev))
return model
def predict(self,X_test,model):
predictions = model.predict_classes(X_test)
return predictions
def accuracy(self,predictions,X_test,Y_test,model):
# Convert Y_test to the same format as predictions
actuals = [0 if y[0] == 1 else 1 for y in Y_test]
print("f1_score:"+f1_score)
# Use SkLearn's Metrics module
return accuracy_score(predictions, actuals)
|
[
"46736751+sumitparw@users.noreply.github.com"
] |
46736751+sumitparw@users.noreply.github.com
|
8fa99a71c36d7d8bfe0c03af05d83e0f8ab3dbb3
|
56cdf15ecf8621a7d64eee1fcac8c05a7bb227b4
|
/setup.py
|
ca019200306a195b1dbfad27c4b3d04011638dde
|
[
"Apache-2.0"
] |
permissive
|
pombredanne/google-resumable-media-python
|
076ec91e0b81999c0571009d30eb4649f4be3e39
|
c158f0f2e43d2730350bd1fbcce4ddde35c4aa96
|
refs/heads/master
| 2021-07-11T04:37:58.824232
| 2017-10-12T16:44:05
| 2017-10-13T17:17:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,069
|
py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
REQUIREMENTS = [
'six',
]
EXTRAS_REQUIRE = {
'requests': [
'requests >= 2.18.0, < 3.0.0dev',
],
}
setuptools.setup(
name='google-resumable-media',
version='0.3.0',
description='Utilities for Google Media Downloads and Resumable Uploads',
author='Google Cloud Platform',
author_email='googleapis-publisher@google.com',
long_description=README,
namespace_packages=['google'],
scripts=[],
url='https://github.com/GoogleCloudPlatform/google-resumable-media-python',
packages=setuptools.find_packages(exclude=('tests*',)),
license='Apache 2.0',
platforms='Posix; MacOS X; Windows',
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
extras_require=EXTRAS_REQUIRE,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
],
)
|
[
"daniel.j.hermes@gmail.com"
] |
daniel.j.hermes@gmail.com
|
1009696b09ebe1c6c83517db0ed3e096f49cd272
|
697c7514abc80e53dab70f22177c649d499500ce
|
/0015/A0015.py
|
e5f4ea89406e03fafc27a45f034c554f39ce471a
|
[] |
no_license
|
aliceqin12/ShowMeTheCode
|
c2fbcf82090c7ccea47936f73c35efbfe927fc28
|
4d52e5b331912a6cc0f2dd842939067d0d2507d9
|
refs/heads/master
| 2021-07-01T19:24:04.602407
| 2017-09-20T02:09:59
| 2017-09-20T02:09:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
import json
import xlrd
import xlwt
def writeCityDataToExcel(city_data):
wb = xlwt.Workbook()
table = wb.add_sheet('city')
i = 0
for number in city_data:
city_name = city_data.get(number)
table.write(i, 0, number)
table.write(i, 1, city_name)
i += 1
wb.save('city.xls')
if __name__ == '__main__':
filename = 'city.txt'
with open(filename, 'r', encoding='utf-8') as f:
city_data = json.load(f)
writeCityDataToExcel(city_data)
|
[
"geekworldzhang@163.com"
] |
geekworldzhang@163.com
|
fc78ea23f77df88d8c88ccf06ee7ba2efa3455b1
|
d01d4fe61ff5161cfc00ff85fc0abc616b82f78e
|
/Programs/Oops/destuctor2.py
|
70cf14616b802b7289e2df46787f867e5a102481
|
[] |
no_license
|
Susama91/Project
|
8f14feadea104b6e258f9a3c4678e67da65c24ba
|
a580c29bf92403fc84c99514e918d8994126f7b1
|
refs/heads/master
| 2020-05-15T01:56:11.035480
| 2019-04-19T07:13:43
| 2019-04-19T07:13:43
| 182,039,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
class x:
def __init__(self):
print("in constructor of x")
def m1(self):
print("in m1 of x")
def __del__(self):
print("in destructor of x")
x1=x()
print(x1)
x2=x1
print(x2)
x3=x2
print(x3)
x1=x()
print(x1)
x2=x()
print(x2)
x3=x()
print(x3)
|
[
"susama00@gmail.com"
] |
susama00@gmail.com
|
52e02810f632692cc57531ee6f8a11cd0b629405
|
8a45adaed54a171a508da5bd855d20ee727846f0
|
/userauth/migrations/0004_auto_20191205_2057.py
|
1f51eef4d9fdae5491ec712f3f3bc6e3f23e967a
|
[] |
no_license
|
Aksa123/ecommerce
|
4b73571eb92ec3b36a3321cd368fbe40874b68bc
|
5de73daa318ab90cdf864600de6644266dc56ed5
|
refs/heads/master
| 2020-09-28T13:20:45.848574
| 2019-12-09T05:04:02
| 2019-12-09T05:04:02
| 226,787,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
# Generated by Django 2.2.7 on 2019-12-05 13:57
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('userauth', '0003_auto_20191205_2051'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='birthday',
field=models.DateField(default=datetime.datetime(2019, 12, 5, 13, 57, 55, 294801, tzinfo=utc)),
),
]
|
[
"="
] |
=
|
95faccea24e6b2ab12d71dc79deb1d28e75712e8
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/keras/applications/xception.py
|
d4a2c3f668cfdab2d8c3b86a1e4fca1705ffeb94
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:5fc4a23b6bf0c06ea81c0d7f377ba3b35b99e19b6f9f35c06bdc98df2ce6e102
size 1649
|
[
"github@cuba12345"
] |
github@cuba12345
|
6c2b89f0c4abae0a19d4ed88a0ec61d2b0381a44
|
a3c16ce3fedb4c2b0b4fbe002738b423e58f3c2e
|
/venv/Scripts/temboo/Library/Google/Directions/GetBicyclingDirections.py
|
cde366c29ff3ab8d08f58f44735e9d9e575eb3cd
|
[] |
no_license
|
DevHyperCoder/News_Manager
|
45b05061db5be8bb32f1485ff5480d4aa6145b3f
|
88b54c3d1995b8f015dc03ac30657e6f9777f3aa
|
refs/heads/master
| 2020-08-21T14:22:57.248732
| 2019-10-19T09:14:45
| 2019-10-19T09:14:45
| 216,178,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,411
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetBicyclingDirections
# Generate biking directions between two locations, denoted by address or latitude/longitude coordinates.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetBicyclingDirections(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetBicyclingDirections Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetBicyclingDirections, self).__init__(temboo_session, '/Library/Google/Directions/GetBicyclingDirections')
def new_input_set(self):
return GetBicyclingDirectionsInputSet()
def _make_result_set(self, result, path):
return GetBicyclingDirectionsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetBicyclingDirectionsChoreographyExecution(session, exec_id, path)
class GetBicyclingDirectionsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetBicyclingDirections
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Google.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('APIKey', value)
def set_Alternatives(self, value):
"""
Set the value of the Alternatives input for this Choreo. ((optional, string) If set to true, additional routes will be returned.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Alternatives', value)
def set_Destination(self, value):
"""
Set the value of the Destination input for this Choreo. ((required, string) Enter the address or latitude/longitude coordinates from which directions will be generated (i.e."104 Franklin St, New York, NY" or "40.7160,-74.0037").)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Destination', value)
def set_Origin(self, value):
"""
Set the value of the Origin input for this Choreo. ((required, string) Enter the address or latitude/longitude coordinates from which directions will be computed (i.e."104 Franklin St, New York, NY" or "40.7160,-74.0037").)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Origin', value)
def set_Region(self, value):
"""
Set the value of the Region input for this Choreo. ((optional, string) Enter the region code for the directions, specified as a ccTLD two-character value.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Region', value)
def set_Sensor(self, value):
"""
Set the value of the Sensor input for this Choreo. ((optional, boolean) Indicates whether or not the directions request is from a device with a location sensor. Value must be either 1 or 0. Defaults to 0 (false).)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Sensor', value)
def set_Units(self, value):
"""
Set the value of the Units input for this Choreo. ((optional, string) Specify the units to be used when displaying results. Options include, metric, or imperial.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Units', value)
def set_Waypoints(self, value):
"""
Set the value of the Waypoints input for this Choreo. ((optional, string) Specify route waypoints, either by address, or latitude/longitude coordinates.)
"""
super(GetBicyclingDirectionsInputSet, self)._set_input('Waypoints', value)
class GetBicyclingDirectionsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetBicyclingDirections Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
class GetBicyclingDirectionsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetBicyclingDirectionsResultSet(response, path)
|
[
"arduinoleo88@gmail.com"
] |
arduinoleo88@gmail.com
|
24fcdb8ccf0280fb8ba2ed75a3851675935d6a4f
|
313bb88c43d74995e7426f9482c6c8e670fdb63c
|
/11_OOP/smartwatch.py
|
bb0fa9b462d04264234a57f615c8c9b8574a8d84
|
[] |
no_license
|
martakedzior/python-course
|
8e93fcea3e9e1cb51920cb1fcf3ffbb310d1d654
|
3af2296c2092023d91ef5ff3b4ef9ea27ec2f227
|
refs/heads/main
| 2023-05-06T07:26:58.452520
| 2021-05-26T16:50:26
| 2021-05-26T16:50:26
| 339,822,876
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
class UsefulStuff:
def __init__(self, name):
print(name, 'is used to make life easier!')
class Watch(UsefulStuff):
def __init__(self, watch_name):
print(watch_name, "is small and convenient")
super().__init__(watch_name)
class Phone(UsefulStuff):
def __init__(self, phone_name):
print(phone_name, "can make a call")
super().__init__(phone_name)
class SmartWatch(Watch, Phone):
def __init__(self):
print('Smartwatch is great!')
super().__init__('Smartwatch')
sw = SmartWatch()
|
[
"marta.kedzior@wp.pl"
] |
marta.kedzior@wp.pl
|
ee2492091381225d0905da86eba4bf1a846bc850
|
eea70db78a214217ba41801d870aba127ba56c56
|
/Code/Untested SarsaZero and SarsaLambda/ARL_package/CodeFramework/PlotAgent.py
|
7ef19024638e26d23ff9fb33f386856cef932710
|
[] |
no_license
|
356255531/poppyProject
|
191b9a9e29817e3d6ce8c85dd5c0702982dd7157
|
678044afffa6390fac8cb402099bd32ae72d8a33
|
refs/heads/master
| 2021-01-21T14:32:47.373344
| 2016-07-12T19:42:25
| 2016-07-12T19:42:25
| 58,334,432
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,276
|
py
|
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
from copy import deepcopy
class PlotAgent(object):
def __init__(self, dimension):
"""
Diagramm:
Horizontal: Episoid Number
Vertical:
1. Step Number
2. Total Reward
3. If reach center
4. Q function difference every 100 episoid
Graph:
Policy after 100 Episoid
"""
self.dimension = dimension
def get_qFun_diff(self, qFuncHistory):
qFunDiff = []
qFuncPre = []
qFuncCurrent = []
for i in qFuncHistory:
qFuncPre = qFuncCurrent
qFuncCurrent = i
if len(list(qFuncPre)) == 0:
continue
temp = 0
for x, y in zip(qFuncPre.values(), qFuncCurrent.values()):
temp += (np.array(x.values(), dtype=float) - np.array(y.values(), dtype=float)) ** 2
qFunDiff.append(np.sqrt(sum(temp)))
qFuncPre = qFuncCurrent
return qFunDiff
def plot_policy_graph(self, policyHistory):
for singlePolicy in reversed(policyHistory):
soaList = []
for state in singlePolicy.keys():
if state == (0, 0):
continue
action = singlePolicy[state]
x, y = state
m, n = action
soaList.append([x, y, m, n])
X,Y,U,V = zip(*soaList)
plt.figure()
ax = plt.gca()
ax.quiver(X,Y,U,V,angles='xy',scale_units='xy',scale=1)
ax.set_xlim([-list(self.dimension)[0] // 2,
list(self.dimension)[0] // 2 + 1])
ax.set_ylim([-list(self.dimension)[1] // 2,
list(self.dimension)[1] // 2 + 1])
break
plt.draw()
plt.show()
def plot_diag(self, diagInfo, qFuncDiff):
stepNumTrue = []
totalReward = []
ifReachCenter = []
for i in diagInfo:
stepNumTrue.append(list(i)[0])
totalReward.append(list(i)[1])
ifReachCenter.append(list(i)[2])
stepNumFalse = deepcopy(stepNumTrue)
for i in xrange(len(ifReachCenter)):
if ifReachCenter[i]:
stepNumFalse[i] = 0
else:
stepNumTrue[i] = 0
length = np.arange(1, len(diagInfo) + 1)
plt.subplot(3, 1, 1)
plt.plot(length, stepNumFalse, 'r')
plt.plot(length, stepNumTrue, 'b')
plt.title('How many steps does learning algorithm need to reach Terminal or failed')
plt.ylabel('Step Number')
# plt.subplot(4, 1, 2)
# plt.plot(length, ifReachCenter, 'r.-')
# plt.title('If the agent reach the goal state')
# plt.ylabel('1 for Reaching')
# plt.xlabel('Episoid')
# # ifReachCenter = np.array(ifReachCenter, dtype=bool) * 1
# # ifReachCenter = np.array(ifReachCenter, dtype=int)
plt.subplot(3, 1, 2)
plt.plot(length, totalReward, 'k')
plt.title('How much is the total reward in one episoid')
plt.ylabel('Reward')
length = np.arange(1, len(qFuncDiff) + 1)
plt.subplot(3, 1, 3)
plt.plot(length, qFuncDiff, 'g-')
plt.title('How big is the difference of Q function in every 10 episoids')
plt.ylabel('Difference')
plt.show()
def plot(self, diagInfo, qFuncHistory, policyHistory):
qFuncDiff = self.get_qFun_diff(qFuncHistory)
self.plot_diag(diagInfo, qFuncDiff)
self.plot_policy_graph(policyHistory)
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
soa =np.array( [ [0,0,3,2], [0,0,1,1],[0,0,9,9]])
X,Y,U,V = zip(*soa)
plt.figure()
ax = plt.gca()
ax.quiver(X,Y,U,V,angles='xy',scale_units='xy',scale=1)
ax.set_xlim([-1,10])
ax.set_ylim([-1,10])
plt.draw()
plt.show()
|
[
"hanzw356255531@icloud.com"
] |
hanzw356255531@icloud.com
|
4a3a05b184e11d4858f1f956115c5dd9c78fc203
|
3d8027f2ef3f723e13b31e056d0c03da4ed74aa8
|
/08-09-20(Day-14)/EmailSend/FirstApp/migrations/0002_auto_20200909_1645.py
|
7a1f067a5b808a0614207c3246015e92d575d536
|
[] |
no_license
|
satyavani462/Django-Batch5
|
2efbc99223008954896667dee46d2606b6559c82
|
1b975bc21e7fdeed11bef7505d22d4fed126656c
|
refs/heads/master
| 2022-12-08T19:57:33.996903
| 2020-09-10T14:23:15
| 2020-09-10T14:23:15
| 294,688,262
| 1
| 0
| null | 2020-09-11T12:22:16
| 2020-09-11T12:22:15
| null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
# Generated by Django 3.0.8 on 2020-09-09 11:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FirstApp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='emailuser',
name='email',
field=models.EmailField(max_length=50, unique=True),
),
migrations.AlterField(
model_name='emailuser',
name='username',
field=models.CharField(max_length=50, unique=True),
),
]
|
[
"nivas0803@gmail.com"
] |
nivas0803@gmail.com
|
3ac8aea314fa6a6da561563e62a994edd3cbe06d
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/GZip/Scripts/UnzipGZFile/UnzipGZFile.py
|
2e9d7d2a0e3b9413aa048c73acff6dc6785cf495
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,896
|
py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import gzip
import re
import shutil
from os.path import isfile
ESCAPE_CHARACTERS = r'[/\<>"|?*]'
def escape_illegal_characters_in_file_name(file_name: str) -> str:
if file_name:
file_name = re.sub(ESCAPE_CHARACTERS, '-', file_name)
file_name = re.sub(r'-+', '-', file_name) # prevent more than one consecutive dash in the file name
return file_name
def gzip_file(fileEntryID: str):
entry_ids = argToList(fileEntryID)
file_names = list()
for entry_id in entry_ids:
res = demisto.executeCommand('getFilePath', {'id': entry_id})
if is_error(res):
raise DemistoException(
'Failed to get the file path for entry: ' + entry_id + ' the error message was ' + get_error(res))
filePath = res[0]['Contents']['path']
fileCurrentName = escape_illegal_characters_in_file_name(res[0]['Contents']['name'])
if not isfile(filePath): # in case that the user will send a directory
raise DemistoException(entry_id + ' is not a file. Please recheck your input.')
# Handling duplicate names.
if fileCurrentName in file_names:
name, ext = os.path.splitext(fileCurrentName)
i = 0
while fileCurrentName in file_names:
i += 1
fileCurrentName = f'{name} {i}{ext}'
# copying the file to current location
shutil.copy(filePath, fileCurrentName)
file_names.append(fileCurrentName)
unzippedgzFileNames = []
for file_name in file_names:
with gzip.open(file_name, 'r') as f_in, open(file_name[:-3], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
with open(file_name[:-3], 'rb') as f:
file_data = f.read()
demisto.results(fileResult(file_name[:-3], file_data))
unzippedgzFileNames.append(file_name[:-3])
readable_output = tableToMarkdown(name="Unzipped GZ Files",
t=[{'Unzipped GZ File Names': unzippedgzFileNames, 'Original File Names': file_names}],
removeNull=True)
return CommandResults(
outputs_prefix="UnzipGZFile.UnzippedGZFiles",
outputs_key_field="UnzippedGZFiles",
outputs=unzippedgzFileNames,
readable_output=readable_output,
raw_response={'UnzippedGZFiles': unzippedgzFileNames},
)
def main():
try:
args = demisto.args()
entryID = args.get('entryID')
if not entryID:
raise DemistoException('You must set an entryID when using the unzip GZ script')
result = gzip_file(fileEntryID=entryID)
return_results(result)
except Exception as exc:
return_error(exc)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
[
"noreply@github.com"
] |
demisto.noreply@github.com
|
3f08c6f4b90708762d29d9cca893e5352aefebb7
|
374d62b3aa78a2aa98077b28a1d78271d1e67a4a
|
/mike/db.py
|
0ef6bbc8339f00f6587fdff6ac35f25ba5778500
|
[
"MIT"
] |
permissive
|
emre/mike
|
b15fc3ea34072db9fa2d71b81828dda160803519
|
d682fa3385568d4f3d37b8e4e5578cc729c63dcc
|
refs/heads/master
| 2020-04-22T16:54:51.540783
| 2019-03-17T21:04:35
| 2019-03-17T21:04:35
| 170,524,004
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,910
|
py
|
import os
import dataset
class Database:
"""The wrapper class for database operations.
"""
def __init__(self, connection_uri=None):
self.connection = dataset.connect(
connection_uri or os.getenv("MIKE_DB"))
@property
def subscriptions(self):
"""Returns the dataset table object."""
return self.connection["subscriptions"]
def subscribe(self, player_account, discord_account, discord_backend_id):
"""Subscribe to a player account from a discord account.
:param player_account: Account name in STEEM blockchain
:param discord_account: Discord ID. (foo#N)
:return: None
"""
self.subscriptions.insert(dict(
player_account=player_account,
discord_account=discord_account,
discord_backend_id=discord_backend_id,
))
def unsubscribe(self, player_account, discord_account):
"""Unsubscribe from a discord account to a discord account.
:param player_account: Account name in STEEM blockchain
:param discord_account: Discord ID. (foo#N)
:return: None
"""
self.subscriptions.delete(
player_account=player_account,
discord_account=discord_account,
)
def subscription_exists(self, player_account, discord_account):
"""Check if a subscription is already exists.
:param player_account: Account name in STEEM blockchain
:param discord_account: Discord ID. (foo#N)
:return (boolean): True or False based on the existence
"""
if self.subscriptions.find_one(
player_account=player_account,
discord_account=discord_account):
return True
return False
def active_subscription_count(self, discord_account):
"""Return the active subscription count for a discord account.
:param discord_account: Discord ID. (foo#N)
:return (boolean): True or False based on the existence
"""
return len(list(
self.subscriptions.find(discord_account=discord_account)))
def all_subscriptions(self):
"""Returns all subscriptions globally."""
return list(self.subscriptions.find())
def subscriptions_by_user(self, discord_account):
"""Return all subscriptions of a particular user.
:param discord_account: Discord ID. (foo#N)
"""
return list(self.subscriptions.find(discord_account=discord_account))
def subscriptions_by_player_account(self, player_account):
"""Return all subscriptions of a player account.
:param player_account: Account name
"""
return list(self.subscriptions.find(player_account=player_account))
def registered_targets(self):
targets = list(self.subscriptions.find())
return [t["player_account"] for t in targets]
|
[
"mail@emreyilmaz.me"
] |
mail@emreyilmaz.me
|
5836243f7b145db5656f8e58b2df169ceefab64f
|
ab79f8297105a7d412303a8b33eaa25038f38c0b
|
/education/timetable/report/__init__.py
|
634e31b396b90a640640fde891364d7a03dcf01d
|
[] |
no_license
|
adahra/addons
|
41a23cbea1e35079f7a9864ade3c32851ee2fb09
|
c5a5678379649ccdf57a9d55b09b30436428b430
|
refs/heads/master
| 2022-06-17T21:22:22.306787
| 2020-05-15T10:51:14
| 2020-05-15T10:51:14
| 264,167,002
| 1
| 0
| null | 2020-05-15T10:39:26
| 2020-05-15T10:39:26
| null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011-2012 Serpent Consulting Services (<http://www.serpentcs.com>)
# Copyright (C) 2013-2014 Serpent Consulting Services (<http://www.serpentcs.com>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import timetable_info
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"prog1@381544ba-743e-41a5-bf0d-221725b9d5af"
] |
prog1@381544ba-743e-41a5-bf0d-221725b9d5af
|
7a2d05ce4585126a339e5fe6678268ec288490f0
|
25b914aecd6b0cb49294fdc4f2efcfdf5803cc36
|
/homeassistant/components/smarttub/binary_sensor.py
|
f5af165525520438b3dbdab8a7cca6a8bc6deacf
|
[
"Apache-2.0"
] |
permissive
|
jason0x43/home-assistant
|
9114decaa8f7c2f1582f84e79dc06736b402b008
|
8bf6aba1cf44ee841de063755c935ea78040f399
|
refs/heads/dev
| 2023-03-04T01:14:10.257593
| 2022-01-01T12:11:56
| 2022-01-01T12:11:56
| 230,622,861
| 1
| 1
|
Apache-2.0
| 2023-02-22T06:15:07
| 2019-12-28T14:45:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,737
|
py
|
"""Platform for binary sensor integration."""
from __future__ import annotations
from smarttub import SpaError, SpaReminder
import voluptuous as vol
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.helpers import entity_platform
from .const import ATTR_ERRORS, ATTR_REMINDERS, DOMAIN, SMARTTUB_CONTROLLER
from .entity import SmartTubEntity, SmartTubSensorBase
# whether the reminder has been snoozed (bool)
ATTR_REMINDER_SNOOZED = "snoozed"
ATTR_ERROR_CODE = "error_code"
ATTR_ERROR_TITLE = "error_title"
ATTR_ERROR_DESCRIPTION = "error_description"
ATTR_ERROR_TYPE = "error_type"
ATTR_CREATED_AT = "created_at"
ATTR_UPDATED_AT = "updated_at"
# how many days to snooze the reminder for
ATTR_REMINDER_DAYS = "days"
RESET_REMINDER_SCHEMA = {
vol.Required(ATTR_REMINDER_DAYS): vol.All(
vol.Coerce(int), vol.Range(min=30, max=365)
)
}
SNOOZE_REMINDER_SCHEMA = {
vol.Required(ATTR_REMINDER_DAYS): vol.All(
vol.Coerce(int), vol.Range(min=10, max=120)
)
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up binary sensor entities for the binary sensors in the tub."""
controller = hass.data[DOMAIN][entry.entry_id][SMARTTUB_CONTROLLER]
entities = []
for spa in controller.spas:
entities.append(SmartTubOnline(controller.coordinator, spa))
entities.append(SmartTubError(controller.coordinator, spa))
entities.extend(
SmartTubReminder(controller.coordinator, spa, reminder)
for reminder in controller.coordinator.data[spa.id][ATTR_REMINDERS].values()
)
async_add_entities(entities)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
"snooze_reminder",
SNOOZE_REMINDER_SCHEMA,
"async_snooze",
)
platform.async_register_entity_service(
"reset_reminder",
RESET_REMINDER_SCHEMA,
"async_reset",
)
class SmartTubOnline(SmartTubSensorBase, BinarySensorEntity):
"""A binary sensor indicating whether the spa is currently online (connected to the cloud)."""
_attr_device_class = BinarySensorDeviceClass.CONNECTIVITY
def __init__(self, coordinator, spa):
"""Initialize the entity."""
super().__init__(coordinator, spa, "Online", "online")
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry.
This seems to be very noisy and not generally useful, so disable by default.
"""
return False
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
return self._state is True
class SmartTubReminder(SmartTubEntity, BinarySensorEntity):
"""Reminders for maintenance actions."""
_attr_device_class = BinarySensorDeviceClass.PROBLEM
def __init__(self, coordinator, spa, reminder):
"""Initialize the entity."""
super().__init__(
coordinator,
spa,
f"{reminder.name.title()} Reminder",
)
self.reminder_id = reminder.id
@property
def unique_id(self):
"""Return a unique id for this sensor."""
return f"{self.spa.id}-reminder-{self.reminder_id}"
@property
def reminder(self) -> SpaReminder:
"""Return the underlying SpaReminder object for this entity."""
return self.coordinator.data[self.spa.id][ATTR_REMINDERS][self.reminder_id]
@property
def is_on(self) -> bool:
"""Return whether the specified maintenance action needs to be taken."""
return self.reminder.remaining_days == 0
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_REMINDER_SNOOZED: self.reminder.snoozed,
ATTR_REMINDER_DAYS: self.reminder.remaining_days,
}
async def async_snooze(self, days):
"""Snooze this reminder for the specified number of days."""
await self.reminder.snooze(days)
await self.coordinator.async_request_refresh()
async def async_reset(self, days):
"""Dismiss this reminder, and reset it to the specified number of days."""
await self.reminder.reset(days)
await self.coordinator.async_request_refresh()
class SmartTubError(SmartTubEntity, BinarySensorEntity):
"""Indicates whether an error code is present.
There may be 0 or more errors. If there are >0, we show the first one.
"""
_attr_device_class = BinarySensorDeviceClass.PROBLEM
def __init__(self, coordinator, spa):
"""Initialize the entity."""
super().__init__(
coordinator,
spa,
"Error",
)
@property
def error(self) -> SpaError | None:
"""Return the underlying SpaError object for this entity."""
errors = self.coordinator.data[self.spa.id][ATTR_ERRORS]
if len(errors) == 0:
return None
return errors[0]
@property
def is_on(self) -> bool:
"""Return true if an error is signaled."""
return self.error is not None
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if (error := self.error) is None:
return {}
return {
ATTR_ERROR_CODE: error.code,
ATTR_ERROR_TITLE: error.title,
ATTR_ERROR_DESCRIPTION: error.description,
ATTR_ERROR_TYPE: error.error_type,
ATTR_CREATED_AT: error.created_at.isoformat(),
ATTR_UPDATED_AT: error.updated_at.isoformat(),
}
|
[
"noreply@github.com"
] |
jason0x43.noreply@github.com
|
843108678682e8392270e75b6a3dcf8f91e7a60d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02686/s152409115.py
|
5134590d61085ac150e14818c0e30a5374b13fcf
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
import sys
N = int(sys.stdin.readline().strip())
S = []
for _ in range(N):
s_i = sys.stdin.readline().strip()
S.append(s_i)
# left bracket - right bracketを高さとした、最小の高さと最終的な高さの座標列
# ex)
# ")": (-1, -1)
# "(": (1, 1)
# "()": (1, 0)
# ")()(": (-1, 0)
# "))))(((((: (-4, 1)
plus_seqs = []
minus_seqs = []
for s_i in S:
h = 0
min_h = float("inf")
for bracket in s_i:
if bracket == "(":
h += 1
else:
h -= 1
min_h = min(min_h, h)
if h >= 0:
plus_seqs.append((min_h, h))
else:
# minus_seqs.append((-1 * min_h, -1 * h))
minus_seqs.append((min_h - h, -1 * h))
# print(plus_seqs)
# print(minus_seqs)
hight = 0
for (min_h, h) in sorted(plus_seqs, reverse=True):
if hight + min_h < 0:
print("No")
sys.exit()
hight += h
hight2 = 0
for (min_h, h) in sorted(minus_seqs, reverse=True):
if hight2 + min_h < 0:
print("No")
sys.exit()
hight2 += h
# print(hight, hight2)
if hight == hight2:
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
463be4007737de91d1de827ce584c2849f45f000
|
20c75b34256a9e2c6d1ac18ac14e923778846660
|
/Interview_Preperation_Kit/WarmUpChallenges/CountingValleys.py
|
d33e153fa0e6134089ebbf856bb74ce0af0de2ab
|
[] |
no_license
|
ktyagi12/HackerRank
|
124303551dfe5b231654b5e96644ac43a775e31d
|
d10fbf50bc549297492618bb1896eca2e0cf3184
|
refs/heads/master
| 2020-09-06T19:33:27.422287
| 2020-01-11T12:02:36
| 2020-01-11T12:02:36
| 220,526,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
#Problem available at: https://www.hackerrank.com/challenges/counting-valleys/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=warmup
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countingValleys function below.
def countingValleys(n, s):
sea_level = 0
valley = 0
step = 0
while(step<n):
if(s[step] == 'U'):
sea_level= sea_level+1
if(s[step] == 'D'):
sea_level = sea_level -1
if(sea_level==0 and s[step]=='U'):
valley = valley+1
step+= 1
return valley
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
s = input()
result = countingValleys(n, s)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"karishmatyagi12@gmail.com"
] |
karishmatyagi12@gmail.com
|
d1291af4db5155e59a61c437de7fcdb6c98f9866
|
841ad26ec31cd4339792c46513109d76c58161e9
|
/aula05/exercicio 8.py
|
f4c1ded6d79d92464638baf72349f0a97f01db8d
|
[] |
no_license
|
Elvis-Lopes/Ci-ncias-de-Dados-Uni9
|
ab5537bfc0f570d639e9763bb80b9654838e76d2
|
2939216c6adef7c64c8a7045b99c117753baaae8
|
refs/heads/master
| 2021-02-11T12:54:56.248564
| 2020-03-30T23:24:03
| 2020-03-30T23:24:03
| 244,492,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
listaNumerica = []
n = float(input('Digite um numero: '))
listaNumerica.append(n)
while n != 0:
n = float(input('Digite um numero: '))
listaNumerica.append(n)
print(listaNumerica)
|
[
"elvislopes1996@hotmail.com"
] |
elvislopes1996@hotmail.com
|
a9e0812a200d12dea201acea2f097974ca462be5
|
b5e9349b073d90ee1188e3fc0f844eaefff68640
|
/travello/views.py
|
28028cf0fe7217f501e771ab2c103086a692ead9
|
[] |
no_license
|
tamanna090903/travello
|
7e33162c450dd6bf8429036e40a5b631a14084cc
|
6d207597ade49354516bc09fa2e5e77624f3a8f3
|
refs/heads/master
| 2020-06-24T17:32:37.622240
| 2019-07-28T16:52:37
| 2019-07-28T16:52:37
| 199,031,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
from django.shortcuts import render
from .models import Destination
# Create your views here.
def index(request):
dests = Destination.objects.all()
return render(request ,"index.html", {'dests': dests})
|
[
"hizbul.ku@gmail.com"
] |
hizbul.ku@gmail.com
|
afa043d0d46daf0c393a951b77ce58cfe19f86d3
|
215fa1a675e15117f6579a96974e187952f0a0b1
|
/gevent/thread.py
|
8de1c6b35ad1f42f5977144ae6beff0d154c30ac
|
[
"MIT"
] |
permissive
|
easel/gevent
|
1d04b36deb871a2cc4578f3d533de0205abf2ccd
|
dcb431e55037192a0461ef8067d8f087a3e084d7
|
refs/heads/master
| 2021-01-01T18:33:59.774821
| 2012-06-06T15:07:40
| 2012-06-06T15:07:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,735
|
py
|
"""Implementation of the standard :mod:`thread` module that spawns greenlets.
.. note::
This module is a helper for :mod:`gevent.monkey` and is not intended to be
used directly. For spawning greenlets in your applications, prefer
:class:`Greenlet` class.
"""
import sys
__implements__ = ['allocate_lock',
'get_ident',
'exit',
'LockType',
'stack_size',
'start_new_thread',
'_local']
__imports__ = ['error']
if sys.version_info[0] <= 2:
__target__ = 'thread'
else:
__target__ = '_thread'
__thread__ = __import__(__target__)
error = __thread__.error
from gevent.hub import getcurrent, GreenletExit
from gevent.greenlet import Greenlet
from gevent.lock import Semaphore as LockType
from gevent.local import local as _local
def get_ident(gr=None):
if gr is None:
return id(getcurrent())
else:
return id(gr)
def start_new_thread(function, args=(), kwargs={}):
greenlet = Greenlet.spawn(function, *args, **kwargs)
return get_ident(greenlet)
def allocate_lock():
return LockType(1)
def exit():
raise GreenletExit
if hasattr(__thread__, 'stack_size'):
_original_stack_size = __thread__.stack_size
def stack_size(size=None):
if size is None:
return _original_stack_size()
if size > _original_stack_size():
return _original_stack_size(size)
else:
pass
# not going to decrease stack_size, because otherwise other greenlets in this thread will suffer
else:
__implements__.remove('stack_size')
__all__ = __implements__ + __imports__
__all__.remove('_local')
# XXX interrupt_main
|
[
"denis.bilenko@gmail.com"
] |
denis.bilenko@gmail.com
|
b0565a046adfc997bf8ea3559143f82649e12133
|
91da8a59561d6f2c7852c0548298434e0ede2ac7
|
/Hash table/MaxNumberofK-SumPairs.py
|
4a0bfbbcd318ab64c3250a8770c8878e0d3af028
|
[] |
no_license
|
prashant97sikarwar/leetcode
|
6d3828772cc426ccf53dad07edb1efbc2f1e1ded
|
e76054e27a5d4493bd1bcef2ebdeb21d257afb63
|
refs/heads/master
| 2023-08-23T05:06:23.181869
| 2021-10-28T18:19:10
| 2021-10-28T18:19:10
| 286,057,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
#Problem Link:- https://leetcode.com/problems/max-number-of-k-sum-pairs/
"""You are given an integer array nums and an integer k. In one operation, you can pick two
numbers from the array whose sum equals k and remove them from the array.Return the maximum
number of operations you can perform on the array."""
class Solution(object):
def maxOperations(self, nums, k):
d = dict()
res = 0
for i in range(len(nums)):
if k-nums[i] not in d:
if nums[i] not in d:
d[nums[i]] = 1
else:
d[nums[i]] += 1
else:
res += 1
d[k-nums[i]] -= 1
if d[k-nums[i]] == 0:
del d[k-nums[i]]
return res
|
[
"prashant97sikarwar@gmail.com"
] |
prashant97sikarwar@gmail.com
|
1e52ec2ede49f6add05f994482b8aeb958a08cfc
|
49185bd5cf7e2f5190ce22b5189a09fe1ab6bb0f
|
/Proper/proper/examples/simple_prescription.py
|
cf7b181641c3e6576ad699572b6fa0f84fe0d83c
|
[
"MIT"
] |
permissive
|
RupertDodkins/MEDIS
|
c3f55d8adb6a8c4120593ba6552c9dfe3784d4e2
|
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
|
refs/heads/master
| 2021-07-05T20:06:44.162517
| 2019-09-05T22:16:12
| 2019-09-05T22:16:12
| 160,850,558
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
import proper
def simple_prescription(wavelength, gridsize):
# Define entrance aperture diameter and other quantities
diam = 1.0
focal_ratio = 15.0
focal_length = diam * focal_ratio
beam_ratio = 0.5
# Define the wavefront
wfo = proper.prop_begin(diam, wavelength, gridsize, beam_ratio)
# Define a circular aperture
proper.prop_circular_aperture(wfo, diam/2)
# Define entrance
proper.prop_define_entrance(wfo)
# Define a lens
proper.prop_lens(wfo, focal_length)
# Propagate the wavefront
proper.prop_propagate(wfo, focal_length)
# End
(wfo, sampling) = proper.prop_end(wfo)
return (wfo, sampling)
|
[
"rupertdodkins@gmail.com"
] |
rupertdodkins@gmail.com
|
4c68017ce4aae30013d89c3c9a04d30934043953
|
cbc5e26bb47ae69e80a3649c90275becf25ce404
|
/xlsxwriter/test/comparison/test_chart_errorbars10.py
|
bc261b91a0893ee6210674e79ef3870005290cca
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] |
permissive
|
mst-solar-car/kicad-bom-generator
|
c3549409c3139f787ad28391372b5cb03791694a
|
2aae905056d06f3d25343a8d784049c141d05640
|
refs/heads/master
| 2021-09-07T14:00:40.759486
| 2018-02-23T23:21:13
| 2018-02-23T23:21:13
| 107,868,801
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_errorbars10.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with error bars."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [69198976, 69200896]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'y_error_bars': {
'type': 'custom',
'plus_values': '=Sheet1!$A$1',
'minus_values': '=Sheet1!$B$1:$B$3',
'plus_data': [1],
'minus_data': [2, 4, 6],
},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"mwrb7d@mst.edu"
] |
mwrb7d@mst.edu
|
480048ab14fa77b38474b844721b38de1a29f589
|
c287efc62bf76323f99f0e8e8460c67123bbe9c4
|
/getdeal/apps/profiles/permissions.py
|
9ce3f833d2f2df58d124cacdd717e675d404c122
|
[] |
no_license
|
PankeshGupta/getdeal
|
ff702e1ab629a06bc6d7ad012c55bc0b0e0c1415
|
b0702a8f7f60de6db9de7f712108e68d66f07f61
|
refs/heads/master
| 2020-03-18T14:15:37.874570
| 2015-02-23T00:12:58
| 2015-02-23T00:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sep 22, 2013
"""
from rest_framework import permissions
class IsUserOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the profile
return obj == request.user
class IsProfileOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the profile
return obj.user == request.user
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
e4cd3f418288f140c96540175a456d68bc217d6b
|
5a281cb78335e06c631181720546f6876005d4e5
|
/karbor-1.3.0/karbor/services/protection/protectable_plugins/share.py
|
60a4bc2f57d26d0747b3f8b2cc01936164dbd252
|
[
"Apache-2.0"
] |
permissive
|
scottwedge/OpenStack-Stein
|
d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8
|
7077d1f602031dace92916f14e36b124f474de15
|
refs/heads/master
| 2021-03-22T16:07:19.561504
| 2020-03-15T01:31:10
| 2020-03-15T01:31:10
| 247,380,811
| 0
| 0
|
Apache-2.0
| 2020-03-15T01:24:15
| 2020-03-15T01:24:15
| null |
UTF-8
|
Python
| false
| false
| 3,812
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from karbor.common import constants
from karbor import exception
from karbor import resource
from karbor.services.protection.client_factory import ClientFactory
from karbor.services.protection import protectable_plugin
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
INVALID_SHARE_STATUS = ['deleting', 'deleted', 'error', 'error_deleting',
'manage_error', 'unmanage_error', 'extending_error',
'shrinking_error', 'reverting_error']
class ShareProtectablePlugin(protectable_plugin.ProtectablePlugin):
"""Manila share protectable plugin"""
_SUPPORT_RESOURCE_TYPE = constants.SHARE_RESOURCE_TYPE
def _client(self, context):
self._client_instance = ClientFactory.create_client(
"manila",
context)
return self._client_instance
def get_resource_type(self):
return self._SUPPORT_RESOURCE_TYPE
def get_parent_resource_types(self):
return (constants.PROJECT_RESOURCE_TYPE, )
def list_resources(self, context, parameters=None):
try:
shares = self._client(context).shares.list(detailed=True)
except Exception as e:
LOG.exception("List all summary shares from manila failed.")
raise exception.ListProtectableResourceFailed(
type=self._SUPPORT_RESOURCE_TYPE,
reason=six.text_type(e))
else:
return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE,
id=share.id, name=share.name)
for share in shares
if share.status not in INVALID_SHARE_STATUS]
def show_resource(self, context, resource_id, parameters=None):
try:
share = self._client(context).shares.get(resource_id)
except Exception as e:
LOG.exception("Show a summary share from manila failed.")
raise exception.ProtectableResourceNotFound(
id=resource_id,
type=self._SUPPORT_RESOURCE_TYPE,
reason=six.text_type(e))
else:
if share.status in INVALID_SHARE_STATUS:
raise exception.ProtectableResourceInvalidStatus(
id=resource_id, type=self._SUPPORT_RESOURCE_TYPE,
status=share.status)
return resource.Resource(type=self._SUPPORT_RESOURCE_TYPE,
id=share.id, name=share.name)
def get_dependent_resources(self, context, parent_resource):
try:
shares = self._client(context).shares.list()
except Exception as e:
LOG.exception("List all shares from manila failed.")
raise exception.ListProtectableResourceFailed(
type=self._SUPPORT_RESOURCE_TYPE,
reason=six.text_type(e))
else:
return [resource.Resource(type=self._SUPPORT_RESOURCE_TYPE,
id=share.id,
name=share.name)
for share in shares
if share.project_id == parent_resource.id
and share.status not in INVALID_SHARE_STATUS]
|
[
"Wayne Gong@minbgong-winvm.cisco.com"
] |
Wayne Gong@minbgong-winvm.cisco.com
|
b25410fbc35275f299a22b2b9d4a9530a7d3c99f
|
127e99fbdc4e04f90c0afc6f4d076cc3d7fdce06
|
/2021_하반기 코테연습/leet_451.py
|
c8f3066418b4e33e2c805fefef2f434513deadaf
|
[] |
no_license
|
holim0/Algo_Study
|
54a6f10239368c6cf230b9f1273fe42caa97401c
|
ce734dcde091fa7f29b66dd3fb86d7a6109e8d9c
|
refs/heads/master
| 2023-08-25T14:07:56.420288
| 2021-10-25T12:28:23
| 2021-10-25T12:28:23
| 276,076,057
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
from collections import defaultdict
class Solution:
def frequencySort(self, s: str) -> str:
fre_cnt = defaultdict(int)
for cur in s:
fre_cnt[cur]+=1
sorted_fre = sorted(fre_cnt.items(), key=lambda x: -x[1])
answer =""
for cur in sorted_fre:
letter, cnt = cur
for _ in range(cnt):
answer+=letter
return answer
|
[
"holim1226@gmail.com"
] |
holim1226@gmail.com
|
f88a8f94c2dd7cf204eba1fa2b522da44f2431ef
|
dde00571d8e65208c0642f009cb1d4bc33460026
|
/bigmler/retrain/dispatcher.py
|
8cc577939895b4f3f5a583c63afcd8068c1d2c27
|
[
"Apache-2.0"
] |
permissive
|
javs0188/bigmler
|
44e5505f4751ebdfece7da87e4d4592b0da7ff51
|
e411bb292a3c8db4cac6754b2b744ffe27fdb47a
|
refs/heads/master
| 2021-03-01T02:08:29.730986
| 2020-01-25T10:43:01
| 2020-01-25T10:43:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,659
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer retrain main processing
Functions to retrain a modeling resource
"""
from __future__ import absolute_import
import sys
import os
import shutil
import bigmler.processing.args as a
import bigmler.utils as u
from bigmler.defaults import DEFAULTS_FILE
from bigmler.retrain.retrain import retrain_model
from bigmler.dispatcher import (SESSIONS_LOG,
clear_log_files)
from bigmler.command import get_context
COMMAND_LOG = u".bigmler_retrain"
DIRS_LOG = u".bigmler_retrain_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
DEFAULT_OUTPUT = "retrain_script"
SETTINGS = {
"command_log": COMMAND_LOG,
"sessions_log": SESSIONS_LOG,
"dirs_log": DIRS_LOG,
"default_output": DEFAULT_OUTPUT,
"defaults_file": DEFAULTS_FILE}
def check_compulsory_options(flags, args):
"""Checks whether the id or a unique tag are provided
"""
return args.resource_id is not None or \
len([flag for flag in flags if flag.endswith("-tag")]) > 0
def retrain_dispatcher(args=sys.argv[1:]):
"""Main processing of the parsed options for BigMLer retrain
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
# parses the command line to get the context args and the log files to use
command_args, command, api, session_file, resume = get_context(args,
SETTINGS)
# --id or --model-tag, --ensemble-tag, etc. is compulsory
if check_compulsory_options(command.flags, command_args):
retrain_model(command_args, api, command,
session_file=session_file)
u.log_message("_" * 80 + "\n", log_file=session_file)
else:
sys.exit("You must provide the ID of the resource to be"
" retrained in the --id option or a unique tag"
" to retrieve such ID."
" Type bigmler retrain --help\n"
" to see all the available options.")
|
[
"merce@bigml.com"
] |
merce@bigml.com
|
ff41520a1318a531ff4c623d7d94c949f05421e2
|
d2b53b3568890dd805575035d09635c422c6bc4d
|
/python/ray/tests/aws/test_autoscaler_aws.py
|
b8ad6f31e04390f923b33d399f671f73469a7377
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
mehrdadn/ray
|
939deda7099eb30371cbb920a9725b314c58c0b5
|
3506910c5da257215d38d02f424acc4f419ddbaf
|
refs/heads/master
| 2020-09-03T15:33:35.578248
| 2020-07-31T21:33:27
| 2020-07-31T21:33:27
| 219,498,150
| 2
| 1
|
Apache-2.0
| 2019-11-04T12:37:23
| 2019-11-04T12:37:22
| null |
UTF-8
|
Python
| false
| false
| 3,393
|
py
|
import pytest
import ray.tests.aws.utils.stubs as stubs
import ray.tests.aws.utils.helpers as helpers
from ray.tests.aws.utils.constants import AUX_SUBNET, DEFAULT_SUBNET, \
DEFAULT_SG_AUX_SUBNET, DEFAULT_SG, DEFAULT_SG_DUAL_GROUP_RULES, \
DEFAULT_SG_WITH_RULES_AUX_SUBNET, DEFAULT_SG_WITH_RULES, AUX_SG
def test_create_sg_different_vpc_same_rules(iam_client_stub, ec2_client_stub):
# use default stubs to skip ahead to security group configuration
stubs.skip_to_configure_sg(ec2_client_stub, iam_client_stub)
# given head and worker nodes with custom subnets defined...
# expect to first describe the worker subnet ID
stubs.describe_subnets_echo(ec2_client_stub, AUX_SUBNET)
# expect to second describe the head subnet ID
stubs.describe_subnets_echo(ec2_client_stub, DEFAULT_SUBNET)
# given no existing security groups within the VPC...
stubs.describe_no_security_groups(ec2_client_stub)
# expect to first create a security group on the worker node VPC
stubs.create_sg_echo(ec2_client_stub, DEFAULT_SG_AUX_SUBNET)
# expect new worker security group details to be retrieved after creation
stubs.describe_sgs_on_vpc(
ec2_client_stub,
[AUX_SUBNET["VpcId"]],
[DEFAULT_SG_AUX_SUBNET],
)
# expect to second create a security group on the head node VPC
stubs.create_sg_echo(ec2_client_stub, DEFAULT_SG)
# expect new head security group details to be retrieved after creation
stubs.describe_sgs_on_vpc(
ec2_client_stub,
[DEFAULT_SUBNET["VpcId"]],
[DEFAULT_SG],
)
# given no existing default head security group inbound rules...
# expect to authorize all default head inbound rules
stubs.authorize_sg_ingress(
ec2_client_stub,
DEFAULT_SG_DUAL_GROUP_RULES,
)
# given no existing default worker security group inbound rules...
# expect to authorize all default worker inbound rules
stubs.authorize_sg_ingress(
ec2_client_stub,
DEFAULT_SG_WITH_RULES_AUX_SUBNET,
)
# given the prior modification to the head security group...
# expect the next read of a head security group property to reload it
stubs.describe_sg_echo(ec2_client_stub, DEFAULT_SG_WITH_RULES)
# given the prior modification to the worker security group...
# expect the next read of a worker security group property to reload it
stubs.describe_sg_echo(ec2_client_stub, DEFAULT_SG_WITH_RULES_AUX_SUBNET)
# given our mocks and an example config file as input...
# expect the config to be loaded, validated, and bootstrapped successfully
config = helpers.bootstrap_aws_example_config_file("example-subnets.yaml")
# expect the bootstrapped config to show different head and worker security
# groups residing on different subnets
assert config["head_node"]["SecurityGroupIds"] == [DEFAULT_SG["GroupId"]]
assert config["head_node"]["SubnetIds"] == [DEFAULT_SUBNET["SubnetId"]]
assert config["worker_nodes"]["SecurityGroupIds"] == [AUX_SG["GroupId"]]
assert config["worker_nodes"]["SubnetIds"] == [AUX_SUBNET["SubnetId"]]
# expect no pending responses left in IAM or EC2 client stub queues
iam_client_stub.assert_no_pending_responses()
ec2_client_stub.assert_no_pending_responses()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"noreply@github.com"
] |
mehrdadn.noreply@github.com
|
00006783d5e79988872b0772507bea6d8a61f0db
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/251/36331/submittedfiles/testes.py
|
2d065c88525ae84222b04ffffd65216933bdb1e3
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
investimento = float (input('Digite o valor do investimento inicial: '))
taxa = float (input('Digite o valor da taxa anual (em decimais): '))
renda = (investimento+(investimento*taxa))
for i in range (1,10+1,1):
print('%.2f'%renda)
investimento=renda
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
56bb280379109927a2e04eac8c3e377a33c399a9
|
3f1edc1a3b9f27c365b9c851d92abba7b1401c02
|
/features_server/features_server.py
|
f780b5f6e54294a687e62896c08f1e7172801396
|
[] |
no_license
|
nakamoo/sumica
|
278e137c8ac79f8d7d6743093b81e1466a89e26e
|
302c72f283edc1f7953e224faf31d3f96bfe73e8
|
refs/heads/master
| 2021-03-22T02:15:42.215137
| 2018-02-13T13:22:55
| 2018-02-13T13:22:55
| 94,852,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,934
|
py
|
import flask
import scipy.misc
import sys
import uuid
from flask import Flask, request
app = Flask(__name__)
import re
import os
import json
from skimage import io
import numpy as np
from io import BytesIO
import cv2
import PyOpenPose as OP
from concurrent.futures import ThreadPoolExecutor, wait
def init_pose():
op = OP.OpenPose((656, 368), (368, 368), (1280, 720), "COCO", "/home/sean/openpose/models/", 0, False,
OP.OpenPose.ScaleMode.ZeroToOne, True, True)
return op
# GPU conflict somehow goes away when using threads
pose_executor = ThreadPoolExecutor(1)
future = pose_executor.submit(init_pose)
wait([future])
op = future.result()
import detection_nn
from i3dnn import I3DNN
i3d = I3DNN("2")
datafiles_root = "../main_server/sumica/datafiles"
def format_image(image):
if len(image.shape) == 2: # grayscale -> 3 channels
image = np.expand_dims(image, 2)
image = np.repeat(image, 3, 2)
elif image.shape[2] > 3: # 4-channel -> 3-channels
image = image[:, :, :3]
elif image.shape[2] == 1: # single-channel -> 3-channelS
image = np.repeat(image, 3, 2)
return image
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
def preprocess(imgmat):
img_yuv = cv2.cvtColor(imgmat, cv2.COLOR_RGB2YUV)
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB)
return img_output
def nms(dets, iou_threshold=0.5):
sorted_list = sorted(dets, key=lambda k: k['confidence'])
filtered_list = []
for det in dets:
skip = False
for b in filtered_list:
if b["label"] == det["label"] and iou(b["box"], det["box"]) > iou_threshold:
skip = True
break
if not skip:
filtered_list.append(det)
return filtered_list
def iou(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def pose_estimation(img):
op.detectPose(img)
op.detectFace(img)
op.detectHands(img)
body = op.getKeypoints(op.KeypointType.POSE)[0]
hand = op.getKeypoints(op.KeypointType.HAND)[0]
face = op.getKeypoints(op.KeypointType.FACE)[0]
new_data = {'body': [], 'hand': [], 'face': []}
if body is not None:
new_data['body'] = body.tolist()
if hand is not None:
new_data['hand'] = hand.tolist()
if face is not None:
new_data['face'] = face.tolist()
return new_data
def object_detection(imgmat, query):
# default settings
conf_thresh = 0.3
get_img_feats = False
get_obj_feats = False
get_obj_dets = True
if "detection_threshold" in query:
conf_thresh = float(query["detection_threshold"])
if "get_image_features" in query:
get_img_feats = query["get_image_features"] == "true"
if "get_object_detections" in query:
get_obj_dets = query["get_object_detections"] == "true"
if "get_object_features" in query:
get_obj_feats = query["get_object_features"] == "true"
only_img_feats = get_img_feats and not get_obj_feats and not get_obj_dets
# if only_img_feats, RCNN will only do region proposal step
out = detection_nn.detect(imgmat, conf_thresh, only_img_feats)
out_data = {}
img_feats, obj_dets, obj_feats = out
if get_img_feats:
fn = str(uuid.uuid4()) + ".npy"
feats = np.max(img_feats, axis=(0, 1)) # collapse feature maps into vector
np.save(datafiles_root + "/image_features/" + fn, feats)
out_data["image_features_filename"] = fn
if get_obj_feats:
fn = str(uuid.uuid4()) + ".npy"
obj_feats = np.array(obj_feats)
np.save(datafiles_root + "/object_features/" + fn, obj_feats)
out_data["object_features_filename"] = fn
if get_obj_dets:
if "nms_threshold" in query:
out_data["detections"] = nms(obj_dets, float(query["nms_threshold"]))
else:
out_data["detections"] = obj_dets
return out_data
def action_recognition(whole_img, data):
for i in range(len(data["detections"])):
if data["detections"][i]["label"] == "person":
box = data["detections"][i]["box"]
im_w, im_h = whole_img.shape[1], whole_img.shape[0]
box_w, box_h = (box[2] - box[0]), (box[3] - box[1])
# expand
cx, cy = (box[0] + box[2]) // 2, (box[1] + box[3]) // 2
longer_side = max(box_w, box_h) * 2.0
constrained_side = min(min(im_w, im_h), longer_side)
a = constrained_side / 2.0
x1, y1, x2, y2 = cx - a, cy - a, cx + a, cy + a
if x1 < 0:
x2 -= x1
x1 = 0
if y1 < 0:
y2 -= y1
y1 = 0
if x2 >= im_w:
x1 -= x2 - im_w
x2 = im_w
if y2 >= im_h:
y1 -= y2 - im_h
y2 = im_h
x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
crop = whole_img[y1:y2, x1:x2, :]
crop = cv2.resize(crop, (224, 224))
img = np.repeat(crop[None, None, :], 10, axis=1)
prob, logits, label, feats = i3d.process_image(img)
det = data["detections"][i]
updates = {}
updates["action_label"] = label
updates["action_confidence"] = float(prob)
updates["action_crop"] = [x1, y1, x2, y2]
updates["action_vector"] = feats
det.update(updates)
# a = persons.index(i)
# if pose_indices[a] is not None:
# updates["detections.{}.pose_body_index".format(i)] = pose_indices[a]
return data
@app.route('/extract_features', methods=["POST"])
def extract_features():
query = request.form.to_dict()
imgmat = format_image(io.imread(query["path"]))
out_data = object_detection(imgmat, query)
future = pose_executor.submit(pose_estimation, (imgmat))
wait([future])
pose_data = future.result()
out_data["pose"] = pose_data
out_data = action_recognition(imgmat, out_data)
return json.dumps(out_data)
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=False, use_reloader=False, debug=False, port=5002)
|
[
"you@example.com"
] |
you@example.com
|
8b2e65993a3863ac9ac5c852480122cca60b8959
|
6f05f7d5a67b6bb87956a22b988067ec772ba966
|
/data/train/python/af6282605186f294d7c47dcace86864f4f872c6egenDb.py
|
af6282605186f294d7c47dcace86864f4f872c6e
|
[
"MIT"
] |
permissive
|
harshp8l/deep-learning-lang-detection
|
93b6d24a38081597c610ecf9b1f3b92c7d669be5
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
refs/heads/master
| 2020-04-07T18:07:00.697994
| 2018-11-29T23:21:23
| 2018-11-29T23:21:23
| 158,597,498
| 0
| 0
|
MIT
| 2018-11-21T19:36:42
| 2018-11-21T19:36:41
| null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
from review.models import *
def run():
n1= node_user(fb_id="34",user_name="joe")
n1.save()
n2= node_user(fb_id="35",user_name="sam")
n2.save()
n3= node_user(fb_id="36",user_name="john")
n3.save()
n4= node_user(fb_id="37",user_name="jeff")
n4.save()
n5= node_user(fb_id="38",user_name="tom")
n5.save()
n6= node_user(fb_id="39",user_name="ravi")
n6.save()
n7= node_user(fb_id="40",user_name="lucky")
n7.save()
edge_friend(node_user_1=n1,node_user_2= n2).save()
edge_friend(node_user_1=n2,node_user_2= n1).save()
edge_friend(node_user_1=n1,node_user_2=n3).save()
edge_friend(node_user_1=n3,node_user_2=n1).save()
edge_friend(node_user_1=n4,node_user_2=n1).save()
edge_friend(node_user_1=n1,node_user_2=n4).save()
edge_friend(node_user_1=n2,node_user_2=n5).save()
edge_friend(node_user_1=n5,node_user_2=n2).save()
reviews(product_id=1234,user_id=n2,comment="ABC",rating=2).save()
reviews(product_id=1234,user_id=n3,comment="DEF",rating=3).save()
reviews(product_id=1234,user_id=n4,comment="GHI",rating=4).save()
reviews(product_id=1234,user_id=n5,comment="JKL",rating=8).save()
reviews(product_id=1234,user_id=n6,comment="MNO",rating=6).save()
reviews(product_id=1234,user_id=n7,comment="PQR",rating=9).save()
|
[
"aliostad+github@gmail.com"
] |
aliostad+github@gmail.com
|
b5391465dd3262d455b496d8d0456ca778bfd174
|
52ad58b5412f9124822283d168391e5e2b8fa150
|
/Linux/Linux命令介绍04.py
|
2a62916447e0b8dbefa96a704151ee2d5f9deddc
|
[] |
no_license
|
JiangHuYiXiao/PythonStudy
|
69ad9795faaf24a6166ab21cae564f6461e1363e
|
aeebce2cacbf3757d25c8c4d24d15639e0bb8e37
|
refs/heads/master
| 2021-08-17T16:54:43.477502
| 2021-08-11T01:06:11
| 2021-08-11T01:06:11
| 153,078,386
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,201
|
py
|
# -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2020/9/11 8:40
# @Software : Python_study
# @Python_verison : 3.7
'''
命令提示符
登录系统后,第一眼看到的内容是:
[root@localhost ~]#
这就是 Linux 系统的命令提示符。那么,这个提示符的含义是什么呢?
[]:这是提示符的分隔符号,没有特殊含义。
root:显示的是当前的登录用户,笔者现在使用的是 root 用户登录。
@:分隔符号,没有特殊含义。
localhost:当前系统的简写主机名(完整主机名是 localhost.localdomain)。
~:代表用户当前所在的目录,此例中用户当前所在的目录是家目录。
#:命令提示符,Linux 用这个符号标识登录的用户权限等级。如果是超级用户,提示符就是 #;如果是普通用户,提示符就是 $。
家目录(又称主目录)是什么? Linux 系统是纯字符界面,用户登录后,要有一个初始登录的位置,这个初始登录位置就称为用户的家:
超级用户的家目录:/root。
普通用户的家目录:/home/用户名。
用户在自己的家目录中拥有完整权限,所以我们也建议操作实验可以放在家目录中进行。我们切换一下用户所在目录,看看有什么效果。
[root@localhost ~]# cd /usr/local
[root@localhost local]#
仔细看,如果切换用户所在目录,那么命令提示符中的会变成用户当前所在目录的最后一个目录(不显示完整的所在目录 /usr/ local,只显示最后一个目录 local)。
命令的基本格式
接下来看看 Linux 命令的基本格式:
[root@localhost ~]# 命令[选项][参数]
命令格式中的 [] 代表可选项,也就是有些命令可以不写选项或参数,也能执行。那么,我们就用 Linux 中最常见的 ls 命令来解释一下命令的格式(有关 ls 命令的具体用法,后续章节会详细介绍)。如果按照命令的分类,那么 ls 命令应该属于目录操作命令。
[root@localhost ~]# ls
anaconda-ks.cfg install.log install.log.syslog
1) 选项的作用
ls 命令之后不加选项和参数也能执行,不过只能执行最基本的功能,即显示当前目录下的文件名。那么加入一个选项,会出现什么结果?
[root@localhost ~]# Is -l
总用量44
-rw-------.1 root root 1207 1 月 14 18:18 anaconda-ks.cfg
-rw-r--r--.1 root root 24772 1 月 14 18:17 install.log
-rw-r--r--.1 root root 7690 1 月 14 18:17 install.log.syslog
如果加一个"-l"选项,则可以看到显示的内容明显增多了。"-l"是长格式(long list)的意思,也就是显示文件的详细信息。至于 "-l" 选项的具体含义,我们稍后再详细讲解。可以看到选项的作用是调整命令功能。如果没有选项,那么命令只能执行最基本的功能;而一旦有选项,则可以显示更加丰富的数据。
Linux 的选项又分为短格式选项(-l)和长格式选项(--all)。短格式选项是英文的简写,用一个减号调用,例如:
[root@localhost ~]# ls -l
而长格式选项是英文完整单词,一般用两个减号调用,例如:
[root@localhost ~]# ls --all
一般情况下,短格式选项是长格式选项的缩写,也就是一个短格式选项会有对应的长格式选项。当然也有例外,比如 ls 命令的短格式选项 -l 就没有对应的长格式选项。所以具体的命令选项可以通过后面我们要学习的帮助命令来进行査询。
2) 参数的作用
参数是命令的操作对象,一般文件、目录、用户和进程等可以作为参数被命令操作。例如:
[root@localhost ~]# ls -l anaconda-ks.cfg
-rw-------.1 root root 1207 1 月 14 18:18 anaconda-ks.cfg
但是为什么一开始 ls 命令可以省略参数?那是因为有默认参数。命令一般都需要加入参数,用于指定命令操作的对象是谁。如果可以省略参数,则一般都有默认参数。例如:
[root@localhost ~]# ls
anaconda-ks.cfg install.log install.log.syslog
这个 ls 命令后面没有指定参数,默认参数是当前所在位置,所以会显示当前目录下的文件名。
'''
|
[
"1163270704@qq.com"
] |
1163270704@qq.com
|
2012b74d2ce14fa5c56da7a2de113423caeae59d
|
0aaf6ce59d305428611958a5bf6a5831407bca65
|
/advisor_server/suggestion/early_stop_algorithm/early_stop_descending.py
|
6f824fe160698a53bf830074eb354aa93ad923bd
|
[
"Apache-2.0"
] |
permissive
|
mlaradji/advisor
|
d770043a5307af1037cad6be1c449d541acf87b0
|
8ec0f8b64809daa80a20d717b4e45ad9fbcadbb0
|
refs/heads/master
| 2023-05-26T05:59:50.169748
| 2018-10-18T10:34:42
| 2018-10-18T10:34:42
| 154,219,666
| 0
| 0
|
Apache-2.0
| 2023-04-29T17:00:36
| 2018-10-22T21:27:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 989
|
py
|
import json
from suggestion.models import Study
from suggestion.models import TrialMetric
from suggestion.early_stop_algorithm.abstract_early_stop import AbstractEarlyStopAlgorithm
class EarlyStopDescendingAlgorithm(AbstractEarlyStopAlgorithm):
def get_early_stop_trials(self, trials):
result = []
for trial in trials:
study = Study.objects.get(name=trial.study_name)
study_configuration_json = json.loads(study.study_configuration)
study_goal = study_configuration_json["goal"]
metrics = TrialMetric.objects.filter(
trial_id=trial.id).order_by("-training_step")
metrics = [metric for metric in metrics]
if len(metrics) >= 2:
if study_goal == "MAXIMIZE":
if metrics[0].objective_value < metrics[1].objective_value:
result.append(trial)
elif study_goal == "MINIMIZE":
if metrics[0].objective_value > metrics[1].objective_value:
result.append(trial)
return result
|
[
"tobeg3oogle@gmail.com"
] |
tobeg3oogle@gmail.com
|
2ff11310059b2cc48f811548c17c1873d63feae0
|
abad4b3101e46e0d8002f9b5796a3c32e958fd10
|
/Demo/demo/settings.py
|
a84aed7b9b3bc62bb44c45d1c30c24332708895d
|
[] |
no_license
|
Twishar/aiohttp
|
9ffec2f7d1431943780ac56c46fa140a589961da
|
e9bc0a8447f9792767ae1b93e15db22875ed114d
|
refs/heads/master
| 2020-03-22T03:35:47.398634
| 2019-03-19T15:33:17
| 2019-03-19T15:33:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
import yaml
from pathlib import Path
__all__ = ('load_config',)
def load_config(config_file=None):
default_file = Path(__file__).parent / 'config.yaml'
with open(default_file, 'r') as f:
config = yaml.safe_load(f)
cf_dict = {}
if config_file:
cf_dict = yaml.safe_load(config_file)
config.update(**cf_dict)
return config
|
[
"stognienkovv@gmail.com"
] |
stognienkovv@gmail.com
|
929356f001a36d06a80acbe8aba1e2c37beeae02
|
fd62d8096dc95923341cfac29f0209bfbea887b4
|
/models_evaluation/xgboost/grid_search/jobs_test/5.0_0.07_0.0_10.0_10.0.job.py
|
6972e9e01ee719fd52a79035172c6ae1a7271bd2
|
[] |
no_license
|
Eulerianial/premise-selection-deepmath-style
|
06c8f2f540bc7e3840c6db0a66c5b30b5f4257f9
|
8684a59b5d8beab1d02a3a7c568a16c790ea4b45
|
refs/heads/master
| 2021-07-17T17:04:13.472687
| 2017-10-25T13:54:44
| 2017-10-25T13:54:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
import xgboost as xgb
import argparse
import sys
import os
from saving_loading import *
#####################################
p = {
"max_depth":int(5.0),
"eta":0.07,
"gamma":0.0,
"num_boost_round":int(10.0),
"early_stopping_rounds":int(10.0)
}
#####################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run CV for xgboost with particular combination of parameters')
parser.add_argument("X",
help = "path to CSR matrix with features of pairs (theorem, premise)")
parser.add_argument("y",
help = "path to CSV file with labels reflecting relevances of pairs (theorem, premise)")
parser.add_argument("output_directory",
help = "path to directory where performance of tested model should be saved")
args = parser.parse_args()
y = read_csv(os.path.abspath(args.y), type_of_records = "int")
X = load_obj(os.path.abspath(args.X))
output_directory = os.path.abspath(args.output_directory)
dtrain = xgb.DMatrix(X, label = y)
params = {
"max_depth":p["max_depth"],
"eta":p["eta"],
"gamma":p["gamma"],
"objective":"binary:logistic"
}
x = xgb.cv(
params = params,
dtrain = dtrain,
num_boost_round = p["num_boost_round"],
early_stopping_rounds = p["early_stopping_rounds"],
nfold = 4,
metrics = {"error","auc","logloss"}
)
output_name = os.path.join(output_directory, "_".join(map(str, list(p.values())))+".pkl")
save_obj({"params":p, "stats":x}, output_name)
|
[
"bartoszpiotrowski@post.pl"
] |
bartoszpiotrowski@post.pl
|
b877783770fb5ebffef4b657a4b127ada97799b4
|
64e24096ab40259cea27d431dce0814bc58597e2
|
/src/pymor/discretizers/fenics/cg.py
|
0f0cdaa80a93a228707624d1669589420e33ced8
|
[
"BSD-2-Clause"
] |
permissive
|
lbalicki/pymor
|
ea657d25d141895a40345533460543b27b79c6f0
|
e99f260097bd1db0eeb26102cdef8c672b3c9868
|
refs/heads/main
| 2023-03-18T18:35:38.993176
| 2022-08-26T06:30:33
| 2022-08-26T06:30:33
| 228,632,806
| 0
| 1
|
NOASSERTION
| 2019-12-17T14:20:50
| 2019-12-17T14:20:49
| null |
UTF-8
|
Python
| false
| false
| 7,419
|
py
|
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
from pymor.core.config import config
config.require('FENICS')
import dolfin as df
from pymor.algorithms.preassemble import preassemble as preassemble_
from pymor.analyticalproblems.elliptic import StationaryProblem
from pymor.analyticalproblems.functions import LincombFunction
from pymor.bindings.fenics import FenicsVectorSpace, FenicsMatrixBasedOperator, FenicsVisualizer
from pymor.discretizers.fenics.domaindiscretizer import discretize_domain
from pymor.models.basic import StationaryModel
from pymor.operators.constructions import LincombOperator, NumpyConversionOperator
from pymor.operators.block import BlockColumnOperator
def discretize_stationary_cg(analytical_problem, diameter=None, degree=1, preassemble=True):
"""Discretizes a |StationaryProblem| with finite elements using FEniCS.
Parameters
----------
analytical_problem
The |StationaryProblem| to discretize.
diameter
If not `None`, `diameter` is passed as an argument to the `domain_discretizer`.
degree
polynomial degree of the finite element.
preassemble
If `True`, preassemble all operators in the resulting |Model|.
Returns
-------
m
The |Model| that has been generated.
data
Dictionary with the following entries:
:mesh: The generated dolfin mesh object.
:boundary_mask: Codim-1 `MeshFunctionSizet` indicating which boundary type a
bundary facet belongs to.
:boundary_ids: Dict mapping boundary types to ids used in `boundary_mask`.
:unassembled_m: In case `preassemble` is `True`, the generated |Model|
before preassembling operators.
"""
assert isinstance(analytical_problem, StationaryProblem)
p = analytical_problem
if p.diffusion is not None and not p.diffusion.shape_range == ():
raise NotImplementedError
if p.nonlinear_advection is not None:
raise NotImplementedError
if p.nonlinear_advection_derivative is not None:
raise NotImplementedError
if p.nonlinear_reaction is not None:
raise NotImplementedError
if p.nonlinear_reaction_derivative is not None:
raise NotImplementedError
if not p.domain.boundary_types <= {'dirichlet', 'neumann'}:
raise NotImplementedError
if p.dirichlet_data is not None and p.dirichlet_data.parametric:
raise NotImplementedError
mesh, (boundary_mask, boundary_ids) = discretize_domain(p.domain, diameter=diameter)
V = df.FunctionSpace(mesh, 'Lagrange', degree)
bc = df.DirichletBC(V, 0. if p.dirichlet_data is None else p.dirichlet_data.to_fenics(mesh)[0].item(),
boundary_mask, boundary_ids['dirichlet'])
u = df.TrialFunction(V)
v = df.TestFunction(V)
dx, ds = df.dx, df.ds
Li = [FenicsMatrixBasedOperator(df.Constant(0.)*u*v*dx, {}, bc, bc_zero=False, name='boundary_part')]
coefficients = [1.]
_assemble_operator(p.diffusion, lambda c: df.inner(c.item() * df.grad(u), df.grad(v)) * dx,
mesh, bc, True, 'diffusion',
Li, coefficients)
_assemble_operator(
p.advection, lambda c: u * sum(ci * gi for ci, gi in zip(c, df.grad(v))) * dx,
mesh, bc, True, 'advection',
Li, coefficients
)
_assemble_operator(
p.reaction, lambda c: c * u * v * dx,
mesh, bc, True, 'reaction',
Li, coefficients,
)
L = LincombOperator(operators=Li, coefficients=coefficients, name='ellipticOperator')
# right-hand side
Fi = []
coefficients_F = []
_assemble_operator(p.rhs, lambda c: c.item() * v * dx,
mesh, bc, False, 'rhs',
Fi, coefficients_F)
if p.neumann_data is not None and p.domain.has_neumann:
_assemble_operator(
p.neumann_data, lambda c: c.item() * v * ds,
mesh, bc, False, 'neumann',
Fi, coefficients_F, negative=True
)
F = LincombOperator(operators=Fi, coefficients=coefficients_F, name='rhsOperator')
h1_0_semi_product = FenicsMatrixBasedOperator(df.inner(df.grad(u), df.grad(v))*dx, {}, bc, bc_zero=False,
name='h1_0_semi')
l2_product = FenicsMatrixBasedOperator(u*v*dx, {}, name='l2')
h1_semi_product = FenicsMatrixBasedOperator(df.inner(df.grad(u), df.grad(v))*dx, {}, bc, bc_zero=False,
name='h1_0_semi')
h1_product = l2_product + h1_semi_product
products = {
'l2': l2_product,
'h1_semi': h1_0_semi_product,
'h1': h1_product,
'h1_0_semi': h1_0_semi_product,
}
if p.outputs:
if any(o[0] not in ('l2', 'l2_boundary') for o in p.outputs):
raise NotImplementedError
outputs = []
for o in p.outputs:
if o[0] == 'l2':
outputs.append(
_assemble_operator(o[1], lambda c: c * v * dx, mesh,
functional=True, name='l2_output')
)
else:
outputs.append(
_assemble_operator(o[1], lambda c: c * v * ds, mesh,
functional=True, name='l2_boundary_output')
)
if len(outputs) > 1:
output_functional = BlockColumnOperator(outputs)
output_functional = NumpyConversionOperator(output_functional.range) @ output_functional
else:
output_functional = outputs[0]
else:
output_functional = None
m = StationaryModel(L, F, output_functional=output_functional, products=products,
visualizer=FenicsVisualizer(FenicsVectorSpace(V)),
name=f'{p.name}_CG')
data = {
'mesh': mesh,
'boundary_mask': boundary_mask,
'boundary_ids': boundary_ids,
'bc': bc,
}
if preassemble:
data['unassembled_m'] = m
m = preassemble_(m)
return m, data
def _assemble_operator(function, factory,
mesh, bc=None, bc_zero=None, name=None,
ops=None, coeffs=None,
negative=False, functional=False):
def assemble_op(f, name):
coeff, params = f.to_fenics(mesh)
return FenicsMatrixBasedOperator(factory(coeff), params,
bc=bc, bc_zero=bc_zero, functional=functional, name=name)
if isinstance(function, LincombFunction):
operators = [assemble_op(f, f'{name}_{i}') for i, f in enumerate(function.functions)]
cfs = [-c if negative else c for c in function.coefficients]
if ops is not None:
ops.extend(operators)
coeffs.extend(cfs)
else:
return LincombOperator(operators, cfs, name=name)
elif function is not None:
operator = assemble_op(function, name)
if ops is not None:
ops.append(operator)
coeffs.append(-1 if negative else 1.)
else:
return -operator if negative else operator
|
[
"stephanrave@uni-muenster.de"
] |
stephanrave@uni-muenster.de
|
a25c812d2cceaf4bdb7d6e95c7aeeb05abfe9817
|
53865bdc3b5e5bb26ecd40d30b66ad71de1081cc
|
/src/bed_gff_manipulation/filter_bed_to_fasta.py
|
33d2602ba502ca600e516a540f906c2a190a9c7e
|
[
"MIT"
] |
permissive
|
olgatsiouri1996/biomisc
|
a5477279ab53a5307ce026868fa77639b45a44af
|
e3709f566c5c93aec884558f1f2b620a1cf9792d
|
refs/heads/main
| 2023-08-19T08:59:32.899646
| 2023-08-16T13:10:05
| 2023-08-16T13:10:05
| 300,590,735
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,213
|
py
|
# python3
import sys
import argparse
from pyfaidx import Fasta
import pandas as pd
import warnings
# input parameters
ap = argparse.ArgumentParser()
ap.add_argument("-bed", "--bed", required=True, help="input bed file(made with bedops, every feature in the \'.gff\' or \'.gff3\' file should have an \'ID\' tag in the \'attributes\' column)")
ap.add_argument("-in", "--input", required=True, help="input fasta file")
ap.add_argument("-out", "--output", required=True, help="output fasta file")
ap.add_argument("-fea", "--feature", required=False, default='gene', type=str, help="specify the pattern to select the lines that have it. Default is \'gene\'")
ap.add_argument("-pro", "--program", required=False, default=1, type=int, help="program to choose: 1) filter the bed file before retrieving sequences, 2) do not filter. Default is 1")
args = vars(ap.parse_args())
# main
# create function to split the input sequence based on a specific number of characters(60)
def split_every_60(s): return [str(s)[i:i+60] for i in range(0,len(str(s)),60)]
# ignore warnings
warnings.filterwarnings('ignore')
# import bed with no headers specified
df = pd.read_csv(args['bed'], sep= "\t", header=None)
# choose program
if args['program'] == 1:
# select the rows containing the feature
bool2 = df.iloc[:, 7].str.contains(args['feature'])
df = df[bool2]
else:
pass
# convert each column to list
chrom = df.iloc[:,0].values.tolist()
start = df.iloc[:,1].values.tolist()
end = df.iloc[:,2].values.tolist()
ids = df.iloc[:,3].values.tolist()
strand = df.iloc[:,5].values.tolist()
# import fasta file
features = Fasta(args['input'])
# iterate all below lists in pairs
sys.stdout = open(args['output'], 'a')
for (a, b, c, d, e) in zip(ids, chrom, start, end, strand):
if str(e) == "+":
print(''.join([">",str(a)," ",str(b),":",str(int(c) + 1),"-",str(d)]).replace('\r', ''))
print('\n'.join(split_every_60(features[str(b)][int(c):int(d)].seq)))
else:
print(''.join([">",str(a)," ",str(b),":",str(int(c) + 1),"-",str(d)," ","reverse complement"]).replace('\r', ''))
print('\n'.join(split_every_60(features[str(b)][int(c):int(d)].reverse.complement.seq)))
sys.stdout.close()
|
[
"olgatsiouri@outlook.com"
] |
olgatsiouri@outlook.com
|
f0cd7cf24537cb5be88b18a99b4c9f72c7b130e8
|
0a1f8957a798006deaa53d10d09f733fab1e6b05
|
/bin/Python27/Lib/site-packages/sympy/core/core.py
|
4c2bf72e9018dc683d8f301c69a313158ff99217
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
metamorph-inc/meta-core
|
a89504ccb1ed2f97cc6e792ba52e3a6df349efef
|
bc7a05e04c7901f477fe553c59e478a837116d92
|
refs/heads/master
| 2023-03-07T02:52:57.262506
| 2023-03-01T18:49:49
| 2023-03-01T18:49:49
| 40,361,476
| 25
| 15
|
NOASSERTION
| 2023-01-13T16:54:30
| 2015-08-07T13:21:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,764
|
py
|
""" The core's core. """
from sympy.core.compatibility import cmp
# used for canonical ordering of symbolic sequences
# via __cmp__ method:
# FIXME this is *so* irrelevant and outdated!
ordering_of_classes = [
# singleton numbers
'Zero', 'One','Half','Infinity','NaN','NegativeOne','NegativeInfinity',
# numbers
'Integer','Rational','Float',
# singleton symbols
'Exp1','Pi','ImaginaryUnit',
# symbols
'Symbol','Wild','Temporary',
# arithmetic operations
'Pow', 'Mul', 'Add',
# function values
'Derivative','Integral',
# defined singleton functions
'Abs','Sign','Sqrt',
'Floor', 'Ceiling',
'Re', 'Im', 'Arg',
'Conjugate',
'Exp','Log',
'Sin','Cos','Tan','Cot','ASin','ACos','ATan','ACot',
'Sinh','Cosh','Tanh','Coth','ASinh','ACosh','ATanh','ACoth',
'RisingFactorial','FallingFactorial',
'factorial','binomial',
'Gamma','LowerGamma','UpperGamma','PolyGamma',
'Erf',
# special polynomials
'Chebyshev','Chebyshev2',
# undefined functions
'Function','WildFunction',
# anonymous functions
'Lambda',
# Landau O symbol
'Order',
# relational operations
'Equality', 'Unequality', 'StrictGreaterThan', 'StrictLessThan',
'GreaterThan', 'LessThan',
]
class BasicType(type):
pass
class Registry(object):
"""
Base class for registry objects.
Registries map a name to an object using attribute notation. Registry
classes behave singletonically: all their instances share the same state,
which is stored in the class object.
All subclasses should set `__slots__ = []`.
"""
__slots__ = []
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
#A set containing all sympy class objects, kept in sync with C
all_classes = set()
class ClassRegistry(Registry):
"""
Namespace for SymPy classes
This is needed to avoid problems with cyclic imports.
To get a SymPy class, use `C.<class_name>` e.g. `C.Rational`, `C.Add`.
For performance reasons, this is coupled with a set `all_classes` holding
the classes, which should not be modified directly.
"""
__slots__ = []
def __setattr__(self, name, cls):
Registry.__setattr__(self, name, cls)
all_classes.add(cls)
def __delattr__(self, name):
cls = getattr(self, name)
Registry.__delattr__(self, name)
# The same class could have different names, so make sure
# it's really gone from C before removing it from all_classes.
if cls not in self.__class__.__dict__.itervalues():
all_classes.remove(cls)
C = ClassRegistry()
class BasicMeta(BasicType):
def __init__(cls, *args, **kws):
setattr(C, cls.__name__, cls)
def __cmp__(cls, other):
# If the other object is not a Basic subclass, then we are not equal to
# it.
if not isinstance(other, BasicType):
return -1
n1 = cls.__name__
n2 = other.__name__
c = cmp(n1, n2)
if not c:
return 0
UNKNOWN = len(ordering_of_classes) + 1
try:
i1 = ordering_of_classes.index(n1)
except ValueError:
i1 = UNKNOWN
try:
i2 = ordering_of_classes.index(n2)
except ValueError:
i2 = UNKNOWN
if i1 == UNKNOWN and i2 == UNKNOWN:
return c
return cmp(i1, i2)
def __lt__(cls, other):
if cls.__cmp__(other) == -1:
return True
return False
def __gt__(cls, other):
if cls.__cmp__(other) == 1:
return True
return False
C.BasicMeta = BasicMeta
|
[
"kevin.m.smyth@gmail.com"
] |
kevin.m.smyth@gmail.com
|
dc82b171cc6ab60e7af9a0c3dfb107309555c95e
|
8114909d3ed6ee1e6d1fbe14a37723015ab53af6
|
/source_test.py
|
91ee4a6072716b0382d94851d413d9bb445b4364
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
notenoughneon/activitystreams-unofficial
|
b0c66d48eb3b43d68b76df069ba237dce9d77489
|
1f45bde45d3d18ef39d69ebd698e248233b94ce9
|
refs/heads/master
| 2021-01-18T03:01:03.101619
| 2014-08-15T15:00:00
| 2014-08-15T23:48:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,093
|
py
|
# coding=utf-8
"""Unit tests for source.py.
"""
__author__ = ['Ryan Barrett <activitystreams@ryanb.org>']
import copy
import json
import mox
from source import Source
from oauth_dropins.webutil import testutil
from oauth_dropins.webutil import util
LIKES = [{
'verb': 'like',
'author': {'id': 'tag:fake.com:person', 'numeric_id': '5'},
'object': {'url': 'http://foo/like/5'},
}, {
'verb': 'like',
'author': {'id': 'tag:fake.com:6'},
'object': {'url': 'http://bar/like/6'},
},
]
ACTIVITY = {
'id': '1',
'object': {
'id': '1',
'tags': LIKES,
}
}
RSVPS = [{
'id': 'tag:fake.com:246_rsvp_11500',
'objectType': 'activity',
'verb': 'rsvp-yes',
'actor': {'displayName': 'Aaron P', 'id': 'tag:fake.com,2013:11500'},
}, {
'objectType': 'activity',
'verb': 'rsvp-no',
'actor': {'displayName': 'Ryan B'},
}, {
'id': 'tag:fake.com:246_rsvp_987',
'objectType': 'activity',
'verb': 'rsvp-maybe',
'actor': {'displayName': 'Foo', 'id': 'tag:fake.com,2013:987'},
}]
EVENT = {
'id': 'tag:fake.com:246',
'objectType': 'event',
'displayName': 'Homebrew Website Club',
}
EVENT_WITH_RSVPS = copy.deepcopy(EVENT)
EVENT_WITH_RSVPS.update({
'attending': [RSVPS[0]['actor']],
'notAttending': [RSVPS[1]['actor']],
'maybeAttending': [RSVPS[2]['actor']],
})
class FakeSource(Source):
DOMAIN = 'fake.com'
def __init__(self, **kwargs):
pass
class SourceTest(testutil.HandlerTest):
def setUp(self):
super(SourceTest, self).setUp()
self.source = FakeSource()
self.mox.StubOutWithMock(self.source, 'get_activities')
def test_original_post_discovery(self):
activity = {'object': {
'objectType': 'article',
'displayName': 'article abc',
'url': 'http://example.com/article-abc',
'tags': [],
}}
self.assert_equals(activity, Source.original_post_discovery(
copy.deepcopy(activity)))
# missing objectType
activity['object']['attachments'] = [{'url': 'http://x.com/y'}]
Source.original_post_discovery(activity)
self.assert_equals([], activity['object']['tags'])
activity['object']['content'] = 'x (not.at end) y (at.the end)'
Source.original_post_discovery(activity)
self.assert_equals(['http://at.the/end'],
activity['object']['upstreamDuplicates'])
self.assert_equals([], activity['object']['tags'])
activity['object'].update({
'content': 'x http://baz/3 y',
'attachments': [{'objectType': 'article', 'url': 'http://foo/1'}],
'tags': [{'objectType': 'article', 'url': 'http://bar/2'}],
})
Source.original_post_discovery(activity)
self.assert_equals([
{'objectType': 'article', 'url': 'http://foo/1'},
{'objectType': 'article', 'url': 'http://bar/2'},
{'objectType': 'article', 'url': 'http://baz/3'},
], activity['object']['tags'])
# leading parens used to cause us trouble
activity = {'object': {'content' : 'Foo (http://snarfed.org/xyz)'}}
Source.original_post_discovery(activity)
self.assert_equals(
[{'objectType': 'article', 'url': 'http://snarfed.org/xyz'}],
activity['object']['tags'])
# don't duplicate PSCs and PSLs with http and https
for field in 'tags', 'attachments':
for scheme in 'http', 'https':
url = scheme + '://foo.com/1'
activity = {'object': {
'content': 'x (foo.com/1)',
field: [{'objectType': 'article', 'url': url}],
}}
Source.original_post_discovery(activity)
self.assert_equals([{'objectType': 'article', 'url': url}],
activity['object']['tags'])
# exclude ellipsized URLs
for ellipsis in '...', u'…':
url = 'foo.com/1' + ellipsis
activity = {'object': {
'content': 'x (%s)' % url,
'attachments': [{'objectType': 'article', 'url': 'http://' + url}],
}}
Source.original_post_discovery(activity)
self.assert_equals([], activity['object']['tags'])
def test_get_like(self):
self.source.get_activities(user_id='author', activity_id='activity',
fetch_likes=True).AndReturn([ACTIVITY])
self.mox.ReplayAll()
self.assert_equals(LIKES[1], self.source.get_like('author', 'activity', '6'))
def test_get_like_numeric_id(self):
self.source.get_activities(user_id='author', activity_id='activity',
fetch_likes=True).AndReturn([ACTIVITY])
self.mox.ReplayAll()
self.assert_equals(LIKES[0], self.source.get_like('author', 'activity', '5'))
def test_get_like_not_found(self):
activity = copy.deepcopy(ACTIVITY)
del activity['object']['tags']
self.source.get_activities(user_id='author', activity_id='activity',
fetch_likes=True).AndReturn([activity])
self.mox.ReplayAll()
self.assert_equals(None, self.source.get_like('author', 'activity', '6'))
def test_get_like_no_activity(self):
self.source.get_activities(user_id='author', activity_id='activity',
fetch_likes=True).AndReturn([])
self.mox.ReplayAll()
self.assert_equals(None, self.source.get_like('author', 'activity', '6'))
def test_get_share(self):
activity = copy.deepcopy(ACTIVITY)
share = activity['object']['tags'][1]
share['verb'] = 'share'
self.source.get_activities(user_id='author', activity_id='activity',
fetch_shares=True).AndReturn([activity])
self.mox.ReplayAll()
self.assert_equals(share, self.source.get_share('author', 'activity', '6'))
def test_get_share_not_found(self):
self.source.get_activities(user_id='author', activity_id='activity',
fetch_shares=True).AndReturn([ACTIVITY])
self.mox.ReplayAll()
self.assert_equals(None, self.source.get_share('author', 'activity', '6'))
def test_add_rsvps_to_event(self):
event = copy.deepcopy(EVENT)
Source.add_rsvps_to_event(event, [])
self.assert_equals(EVENT, event)
Source.add_rsvps_to_event(event, RSVPS)
self.assert_equals(EVENT_WITH_RSVPS, event)
def test_get_rsvps_from_event(self):
self.assert_equals([], Source.get_rsvps_from_event(EVENT))
self.assert_equals(RSVPS, Source.get_rsvps_from_event(EVENT_WITH_RSVPS))
def test_get_rsvps_from_event_bad_id(self):
event = copy.deepcopy(EVENT)
for id in None, 'not_a_tag_uri':
event['id'] = id
self.assert_equals([], Source.get_rsvps_from_event(event))
def test_base_object_multiple_objects(self):
like = copy.deepcopy(LIKES[0])
like['object'] = [like['object'], {'url': 'http://fake.com/second'}]
self.assert_equals(('second', 'http://fake.com/second'),
self.source.base_object(like))
def test_content_for_create(self):
def cfc(base, extra):
obj = base.copy()
obj.update(extra)
return self.source._content_for_create(obj)
self.assertEqual(None, cfc({}, {}))
for base in ({'objectType': 'article'},
{'inReplyTo': {'url': 'http://not/fake'}},
{'objectType': 'comment', 'object': {'url': 'http://not/fake'}}):
self.assertEqual(None, cfc(base, {}))
self.assertEqual('c', cfc(base, {'content': ' c '}))
self.assertEqual('n', cfc(base, {'content': 'c', 'displayName': 'n'}))
self.assertEqual('s', cfc(base, {'content': 'c', 'displayName': 'n',
'summary': 's'}))
for base in ({'objectType': 'note'},
{'inReplyTo': {'url': 'http://fake.com/post'}},
{'objectType': 'comment',
'object': {'url': 'http://fake.com/post'}}):
self.assertEqual(None, cfc(base, {}))
self.assertEqual('n', cfc(base, {'displayName': 'n'}))
self.assertEqual('c', cfc(base, {'displayName': 'n', 'content': 'c'}))
self.assertEqual('s', cfc(base, {'displayName': 'n', 'content': 'c',
'summary': ' s '}))
|
[
"git@ryanb.org"
] |
git@ryanb.org
|
b27eade25115e891e7aff1fada285bf11bcc7f81
|
dd9e19abfff532e7f4dea5f5b57ac6a4da9f1e6f
|
/fabric/thread_handling.py
|
25aa3a2326b3f02aad1cc42e0c3341329190fd37
|
[
"BSD-2-Clause"
] |
permissive
|
jonatkinson/fabric
|
27c6146243a2c846162e0a6e14f282b900cb2734
|
64eb6c56e1aa4c0b654bb8d17f0a09386616342b
|
refs/heads/master
| 2020-12-24T20:01:02.759635
| 2011-03-04T02:26:31
| 2011-03-04T02:26:31
| 1,441,428
| 0
| 0
|
BSD-2-Clause
| 2020-06-01T13:22:18
| 2011-03-04T23:00:45
|
Python
|
UTF-8
|
Python
| false
| false
| 587
|
py
|
import threading
import sys
class ThreadHandler(object):
def __init__(self, name, callable, *args, **kwargs):
# Set up exception handling
self.exception = None
def wrapper(*args, **kwargs):
try:
callable(*args, **kwargs)
except BaseException:
self.exception = sys.exc_info()
# Kick off thread
thread = threading.Thread(None, wrapper, name, args, kwargs)
thread.setDaemon(True)
thread.start()
# Make thread available to instantiator
self.thread = thread
|
[
"jeff@bitprophet.org"
] |
jeff@bitprophet.org
|
b22c5bba251df2059e2e293d1f03d796f0be5fc0
|
95b0b12c8e3b9982aff752b4f5e69e7812e56728
|
/12-Spider/09_UA.py
|
b53bce5333ba1cfe540fc9814bdfdbbfd79ab36e
|
[] |
no_license
|
PeterTao666/learnpython2
|
fb6792de7d28d306eaeda9098914fa5bb2151592
|
56a506590bf625c5c1ab23a530f30b23b89c8864
|
refs/heads/master
| 2020-04-17T11:41:12.587398
| 2019-02-12T14:06:38
| 2019-02-12T14:06:38
| 166,550,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
# 访问一个网址
# 更改自己的UserAgent进行伪装
from urllib import request, error
if __name__ == '__main__':
url = 'http://www.baidu.com'
try:
# 使用head方法伪装UA
# 方法一:
#headers = {}
#headers['User-Agent'] = 'Mozilla/5.0 (ipad;CPU OS 5_0 like Mac OS X) AppleWibKit/534.46(KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3'
#req = request.Request(url, headers=headers)
# 方法二:使用add_header方法
req = request.Request(url)
req.add_header("User-Agent", "Mozilla/5.0 (ipad;CPU OS 5_0 like Mac OS X) AppleWibKit/534.46(KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3'")
# 正常访问
rsp = request.urlopen(req)
html = rsp.read().decode()
print(html)
except error.HTTPError as e:
print(e)
except error.URLError as e:
print(e)
except Exception as e:
print(e)
print("Done...")
|
[
"taofeng1993@163.com"
] |
taofeng1993@163.com
|
e05696c1beeb3ed1f8442fe11b4519696639551d
|
634fb5fe10e8f944da44ab31896acc8471ec5f18
|
/hq_env/lib/python2.7/site-packages/openpyxl/tests/test_write.py
|
097e233c689ef564cb1d2d59ffbcc48be6eaf0f3
|
[] |
no_license
|
dimagi/commcarehq-venv
|
277d0b6fada24f2edd54f74850267201153412a7
|
2c52e3fb0f974cae5c5feaea1d5de851fe530c80
|
refs/heads/master
| 2021-01-18T14:05:47.931306
| 2015-07-20T10:10:41
| 2015-07-20T10:10:41
| 11,513,855
| 1
| 1
| null | 2015-07-20T10:10:41
| 2013-07-18T21:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 7,980
|
py
|
# file openpyxl/tests/test_write.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
# Python stdlib imports
from StringIO import StringIO
import os.path
# 3rd party imports
from nose.tools import eq_, with_setup, raises
# package imports
from openpyxl.tests.helper import TMPDIR, DATADIR, \
assert_equals_file_content, clean_tmpdir, make_tmpdir
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook
from openpyxl.writer.excel import save_workbook, save_virtual_workbook, \
ExcelWriter
from openpyxl.writer.workbook import write_workbook, write_workbook_rels
from openpyxl.writer.worksheet import write_worksheet, write_worksheet_rels
from openpyxl.writer.strings import write_string_table
from openpyxl.writer.styles import StyleWriter
@with_setup(setup = make_tmpdir, teardown = clean_tmpdir)
def test_write_empty_workbook():
wb = Workbook()
dest_filename = os.path.join(TMPDIR, 'empty_book.xlsx')
save_workbook(wb, dest_filename)
assert os.path.isfile(dest_filename)
def test_write_virtual_workbook():
old_wb = Workbook()
saved_wb = save_virtual_workbook(old_wb)
new_wb = load_workbook(StringIO(saved_wb))
assert new_wb
def test_write_workbook_rels():
wb = Workbook()
content = write_workbook_rels(wb)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'workbook.xml.rels'), content)
def test_write_workbook():
wb = Workbook()
content = write_workbook(wb)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'workbook.xml'), content)
def test_write_string_table():
table = {'hello': 1, 'world': 2, 'nice': 3}
content = write_string_table(table)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sharedStrings.xml'), content)
def test_write_worksheet():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1.xml'), content)
def test_write_hidden_worksheet():
wb = Workbook()
ws = wb.create_sheet()
ws.sheet_state = ws.SHEETSTATE_HIDDEN
ws.cell('F42').value = 'hello'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1.xml'), content)
def test_write_bool():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = False
ws.cell('F43').value = True
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_bool.xml'), content)
def test_write_formula():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = 10
ws.cell('F2').value = 32
ws.cell('F3').value = '=F1+F2'
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_formula.xml'), content)
def test_write_style():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = '13%'
style_id_by_hash = StyleWriter(wb).get_style_by_hash()
content = write_worksheet(ws, {}, style_id_by_hash)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_style.xml'), content)
def test_write_height():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F1').value = 10
ws.row_dimensions[ws.cell('F1').row].height = 30
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_height.xml'), content)
def test_write_hyperlink():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com"
content = write_worksheet(ws, {'test': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_hyperlink.xml'), content)
def test_write_hyperlink_rels():
wb = Workbook()
ws = wb.create_sheet()
eq_(0, len(ws.relationships))
ws.cell('A1').value = "test"
ws.cell('A1').hyperlink = "http://test.com/"
eq_(1, len(ws.relationships))
ws.cell('A2').value = "test"
ws.cell('A2').hyperlink = "http://test2.com/"
eq_(2, len(ws.relationships))
content = write_worksheet_rels(ws, 1)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_hyperlink.xml.rels'), content)
def test_hyperlink_value():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').hyperlink = "http://test.com"
eq_("http://test.com", ws.cell('A1').value)
ws.cell('A1').value = "test"
eq_("test", ws.cell('A1').value)
def test_write_auto_filter():
wb = Workbook()
ws = wb.worksheets[0]
ws.cell('F42').value = 'hello'
ws.auto_filter = 'A1:F1'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_auto_filter.xml'), content)
content = write_workbook(wb)
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'workbook_auto_filter.xml'), content)
def test_freeze_panes_horiz():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'A4'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_freeze_panes_horiz.xml'), content)
def test_freeze_panes_vert():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'D1'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_freeze_panes_vert.xml'), content)
pass
def test_freeze_panes_both():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('F42').value = 'hello'
ws.freeze_panes = 'D4'
content = write_worksheet(ws, {'hello': 0}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'sheet1_freeze_panes_both.xml'), content)
def test_long_number():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = 9781231231230
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'long_number.xml'), content)
def test_short_number():
wb = Workbook()
ws = wb.create_sheet()
ws.cell('A1').value = 1234567890
content = write_worksheet(ws, {}, {})
assert_equals_file_content(os.path.join(DATADIR, 'writer', 'expected', \
'short_number.xml'), content)
|
[
"dmyung@dimagi.com"
] |
dmyung@dimagi.com
|
4e0c8635049fc400d8256cfd2f5f3190bb8a40f3
|
814f8b85dd6435b3bb3fdebf2f193912aa145a62
|
/image_segmentation/slim_fcn/utils.py
|
d84502d6bbb3c3d6baedec6adf6a1cc5d5015d94
|
[
"Apache-2.0"
] |
permissive
|
jacke121/pycharm
|
480df86258ee918de25b76a4156e9e6b9d355df7
|
b9b2963cf0c5028f622f41413f52f1b5cbde28a1
|
refs/heads/master
| 2020-03-18T16:35:25.579992
| 2018-01-01T02:30:58
| 2018-01-01T02:30:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by weihang huang on 17-12-23
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import tensorflow as tf
import numpy as np
def colormap(n):
cmap = np.zeros([n, 3]).astype(np.uint8)
for i in np.arange(n):
r, g, b = np.zeros(3)
for j in np.arange(8):
r = r + (1 << (7 - j)) * ((i & (1 << (3 * j))) >> (3 * j))
g = g + (1 << (7 - j)) * ((i & (1 << (3 * j + 1))) >> (3 * j + 1))
b = b + (1 << (7 - j)) * ((i & (1 << (3 * j + 2))) >> (3 * j + 2))
cmap[i, :] = np.array([r, g, b])
return cmap
class Colorize(object):
def __init__(self):
self.cmap = colormap(256)
def __call__(self, gray_image):
size = gray_image.shape
color_image = np.zeros((size[0], size[1], 3))
for i in range(color_image.shape[0]):
for j in range(color_image.shape[1]):
color_image[i, j, :] = self.cmap[gray_image[i, j]]
return color_image
def colorize(ori_img):
color_fcn = Colorize()
img = color_fcn(ori_img.astype(np.uint8))
return img
|
[
"614047311@qq.com"
] |
614047311@qq.com
|
be6d8f5c44955195923dcdcee4b4c0b69fd07ab1
|
edf06a2bbe5f2ac332e7c93a91b391b548d2caa3
|
/caso/config.py
|
0f06b5ea25793404583023ee7b11c239b5b91f4f
|
[
"Apache-2.0"
] |
permissive
|
enolfc/caso
|
b3fcb8490491f94b73ff23f516426f7bf257b20f
|
22711ca71de4dcd99c231d3220005e0faee9b80d
|
refs/heads/master
| 2023-08-05T09:57:55.633603
| 2022-03-21T11:28:09
| 2022-03-24T15:46:48
| 27,259,205
| 0
| 0
|
Apache-2.0
| 2022-03-17T13:13:20
| 2014-11-28T09:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2014 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import warnings
from oslo_config import cfg
from oslo_log import log
import caso
logging.captureWarnings(True)
warnings.simplefilter("default", DeprecationWarning)
log.register_options(cfg.CONF)
def parse_args(argv, default_config_files=None):
cfg.CONF(argv[1:],
project='caso',
version=caso.__version__,
default_config_files=default_config_files)
|
[
"aloga@ifca.unican.es"
] |
aloga@ifca.unican.es
|
827d277eb2c6b6355e2ed92d4b2f89b51345f449
|
d31d744f62c09cb298022f42bcaf9de03ad9791c
|
/model-analysis/tensorflow_model_analysis/extractors/__init__.py
|
a06399b7fca2700ab4b4b86ce7286df7ba755c01
|
[] |
no_license
|
yuhuofei/TensorFlow-1
|
b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0
|
36eb6994d36674604973a06159e73187087f51c6
|
refs/heads/master
| 2023-02-22T13:57:28.886086
| 2021-01-26T14:18:18
| 2021-01-26T14:18:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init module for TensorFlow Model Analysis extractors."""
from tensorflow_model_analysis.extractors import legacy_meta_feature_extractor as meta_feature_extractor
from tensorflow_model_analysis.extractors.batched_input_extractor import BatchedInputExtractor
from tensorflow_model_analysis.extractors.extractor import Extractor
from tensorflow_model_analysis.extractors.extractor import Filter
from tensorflow_model_analysis.extractors.legacy_feature_extractor import FeatureExtractor
from tensorflow_model_analysis.extractors.legacy_input_extractor import InputExtractor
from tensorflow_model_analysis.extractors.legacy_predict_extractor import PredictExtractor
from tensorflow_model_analysis.extractors.slice_key_extractor import SLICE_KEY_EXTRACTOR_STAGE_NAME
from tensorflow_model_analysis.extractors.slice_key_extractor import SliceKeyExtractor
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
bd1d6657e2a5c8b6d49190039ae96a131706ac70
|
99b062cb9f5f3ff10c9f1fa00e43f6e8151a43a6
|
/algorithm/AD/장기.py
|
4e1e8a0d1bdb348bfe634238d4aec62f57fbc256
|
[] |
no_license
|
HSx3/TIL
|
92acc90758015c2e31660617bd927f7f100f5f64
|
981c9aaaf09c930d980205f68a28f2fc8006efcb
|
refs/heads/master
| 2020-04-11T21:13:36.239246
| 2019-05-08T08:18:03
| 2019-05-08T08:18:03
| 162,099,042
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
import sys
sys.stdin = open("장기_input.txt")
def bfs():
que = []
que.append((R, C, 0))
data[R][C] = 1
dr = [-2, -2, -1, 1, 2, 2, 1, -1]
dc = [-1, 1, 2, 2, 1, -1, -2, -2]
while que:
r, c, turn = que.pop(0)
if r == S and c == K:
return turn
for i in range(8):
nr = r + dr[i]
nc = c + dc[i]
if nr < 0 or nr >= N or nc < 0 or nc >= M:
continue
if data[nr][nc] == 1:
continue
data[nr][nc] = 1
que.append((nr, nc, turn+1))
N, M = map(int, input().split())
R, C, S, K = map(int, input().split())
R -= 1
C -= 1
S -= 1
K -= 1
data = [[0 for _ in range(M)] for _ in range(N)]
print(bfs())
|
[
"hs.ssafy@gmail.com"
] |
hs.ssafy@gmail.com
|
164b9f7d0ee11a4e314b06179056de3565e0c3d7
|
b5bde703700ccf5b575f2382d357c0d2f5bd306c
|
/code/.metadata/.plugins/org.eclipse.core.resources/.history/7e/90dd27476cf30014128189d707139bfe
|
9f9953b39fdd1318ccfa7bad7921046b4f189881
|
[] |
no_license
|
lordwarlock/IRFinalProj
|
6a4a4a8d880f27fcd38054125c5e695c179863d6
|
cc2b3003fb41a63cb85c84bbdf13c20d8a7e1cba
|
refs/heads/master
| 2016-08-07T08:52:46.564262
| 2015-05-06T21:48:42
| 2015-05-06T21:48:42
| 33,019,191
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,989
|
#!/usr/bin/python
'''
Defines the general behavior that can be used by all cgi search modules.
'''
import unicodedata
import cgi
import cgitb
cgitb.enable()
def receive_data():
'''
Receive and return the cgi data from web-page.
@return: the cgi form data from the corresponding web-page
'''
print 'Content-Type: text/html\n\n'
data = cgi.FieldStorage()
return data
def find_search_result(rst_list, process_each_search_result):
'''
According to the search results list, and search result processing method,
return a search results html string.
@param rst_list: the search results list
@param process_each_search_result: the method of processing each search result
@return: the search result html string
'''
# If search miss
if len(rst_list) == 0:
return 'Search miss!<br>\n'
rst_string = 'Total search hits number: ' + str(len(rst_list)) + '<br><br>\n'
# Print out top 10 search hits
for i in range(0, 10):
if i >= len(rst):
break
if i % 2 == 0:
rst_string += '<div class="blog-top">\n'
search_result += process_each_search_result(rst_list[i], i + 1)
if i % 2 == 1:
search_result += '''
<div class="clear"></div>
</div>
'''
return rst_string
def html_file_top():
'''
Return a html string that corresponds to the search result page's header.
@return: a html string that corresponds to the search result page's header
'''
return '''
<html>
<head>
<title>Search Result</title>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<link href="css/style.css" rel="stylesheet" type="text/css" media="all" />
<link href='http://fonts.googleapis.com/css?family=Raleway' rel='stylesheet' type='text/css'>
<script src="js/jquery.min.js"></script>
</head>
<body>
<div class="index-banner1">
<div class="header-top">
<div class="wrap">
<div class="logo">
<a href="home.html"><img src="images/another_search.png" alt=""/></a>
</div>
<div class="clear"></div>
</div>
</div>
</div>
<div class="main">
<div class="wrap">
<div class="abstract">
'''
def html_file_bottom():
'''
Return a html string that corresponds to the search result page's footer.
@return: a html string that corresponds to the search result page's footer
'''
return ''' </body>
</html>
'''
def write_and_jump(rst_html_str):
'''
Write the search result html string into ./soccer_search/result.html file,
then jump current web-page into the result page (http://localhost:8000/soccer_search/result.html)
@param rst_html_str: the search result html string
'''
# Write the processed search result html string into ./soccer_search/result.html file
with open('./soccer_search/result.html', 'w') as html_file:
html_file.write(html_file_top())
html_file.write(rst_html_str)
html_file.write(html_file_bottom())
# Jump current web-page into the result page
print '''
<html>
<meta http-equiv="refresh" content="0.1;url=http://localhost:8000/soccer_search/result.html">
</html>
'''
|
[
"apple@Junchao.local"
] |
apple@Junchao.local
|
|
54d43d884097f994f64480f38c5b51fee9295850
|
a6476a929a361a9fcd0f1c23635d24554032000f
|
/horizon/horizon/dashboards/nova/images_and_snapshots/snapshots/forms.py
|
1e91fb12ce2f14d5044b9194607ef23c81bed843
|
[
"Apache-2.0"
] |
permissive
|
ehazlett/horizon
|
976eba79aed5390b98926389c8df29bbbc8d657b
|
993cc3bcd8e47a823733af5756fcb0f42cc4c703
|
refs/heads/master
| 2020-12-25T12:47:48.879504
| 2012-01-06T20:56:27
| 2012-01-06T20:56:27
| 3,123,162
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext as _
from openstackx.api import exceptions as api_exceptions
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class CreateSnapshot(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput())
instance_id = forms.CharField(widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
name = forms.CharField(max_length="20", label=_("Snapshot Name"))
def handle(self, request, data):
try:
LOG.info('Creating snapshot "%s"' % data['name'])
snapshot = api.snapshot_create(request,
data['instance_id'],
data['name'])
instance = api.server_get(request, data['instance_id'])
messages.info(request,
_('Snapshot "%(name)s" created for instance "%(inst)s"') %
{"name": data['name'], "inst": instance.name})
return shortcuts.redirect('horizon:nova:images_and_snapshots'
':snapshots:index')
except api_exceptions.ApiException, e:
msg = _('Error Creating Snapshot: %s') % e.message
LOG.exception(msg)
messages.error(request, msg)
return shortcuts.redirect(request.build_absolute_uri())
|
[
"gabriel@strikeawe.com"
] |
gabriel@strikeawe.com
|
339b4951be14fbb332d93723eb4ca888cccd60f9
|
c0075f31ff48142a05d92f11840229beee09f697
|
/tests/plugins/test_googledrive.py
|
340eb364e9161bfe2aa709b277873eebea29254f
|
[
"BSD-Source-Code",
"BSD-2-Clause"
] |
permissive
|
beardypig/streamlink
|
c1d44605ced0c924257b4813649acb406b035cb8
|
54c25e49a45a5545d2d9a545320cd2034ea41cd3
|
refs/heads/master
| 2021-12-12T04:31:02.955852
| 2020-11-10T06:18:33
| 2020-11-10T06:39:46
| 70,149,227
| 5
| 2
|
BSD-2-Clause
| 2019-08-08T11:56:37
| 2016-10-06T11:52:09
|
Python
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
import unittest
from streamlink.plugins.googledrive import GoogleDocs
class TestPluginGoogleDocs(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://drive.google.com/file/d/123123/preview?start=1',
]
for url in should_match:
self.assertTrue(GoogleDocs.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://example.com/index.html',
]
for url in should_not_match:
self.assertFalse(GoogleDocs.can_handle_url(url))
|
[
"gravyboat@users.noreply.github.com"
] |
gravyboat@users.noreply.github.com
|
3435c887badc00f8d36cb10f18293efc83d2474a
|
2e6f37e664d2cc85d0c704f20de05b2cae86771d
|
/dataloader/image_list.py
|
64e7b0334e72a01f88ae9d4278fd9d20d06d2bef
|
[
"MIT"
] |
permissive
|
LEOGML/cv_template
|
5bee5e43efb490649f63a7c4e1b77e62a3e1d948
|
c1a87465f0aeb79dab63b0cae88861a6282c045c
|
refs/heads/master
| 2023-01-30T21:32:38.240103
| 2020-12-15T09:39:14
| 2020-12-15T09:39:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,122
|
py
|
# encoding=utf-8
import pdb
import os
import torch.utils.data.dataset as dataset
import misc_utils as utils
import random
import numpy as np
import cv2
from dataloader.transforms.custom_transform import read_image
class ListTrainValDataset(dataset.Dataset):
"""ImageDataset for training.
Args:
file_list(str): dataset list, input and label should be split by ','
aug(bool): data argument (×8)
norm(bool): normalization
Example:
train_dataset = ImageDataset('train.txt', aug=False)
for i, data in enumerate(train_dataset):
input, label = data['input']. data['label']
"""
def __init__(self, file_list, transforms, max_size=None):
self.im_names = []
self.labels = []
with open(file_list, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
img, label = line.split()
img, label = img.strip(), label.strip()
self.im_names.append(img)
self.labels.append(label)
self.transforms = transforms
self.max_size = max_size
def __getitem__(self, index):
"""Get indexs by index
Args:
index(int): index
Returns:
{
'input': input,
'label': label,
'path': path,
}
"""
input = read_image(self.im_names[index])
gt = read_image(self.labels[index])
sample = self.transforms(**{
'image': input,
'gt': gt,
})
sample = {
'input': sample['image'],
'label': sample['gt'],
'path': self.im_names[index],
}
return sample
def __len__(self):
if self.max_size is not None:
return min(self.max_size, len(self.im_names))
return len(self.im_names)
class ListTestDataset(dataset.Dataset):
"""ImageDataset for test.
Args:
file_list(str): dataset path'
norm(bool): normalization
Example:
test_dataset = ImageDataset('test', crop=256)
for i, data in enumerate(test_dataset):
input, file_name = data
"""
def __init__(self, file_list, transforms, max_size=None):
self.im_names = []
with open(file_list, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
img = line
self.im_names.append(img)
self.transforms = transforms
self.max_size = max_size
def __getitem__(self, index):
input = Image.open(self.im_names[index]).convert("RGB")
sample = self.transforms(**{
'image': input,
'gt': input,
})
sample = {
'input': sample['image'],
'path': self.im_names[index],
}
return sample
def __len__(self):
if self.max_size is not None:
return min(self.max_size, len(self.im_names))
return len(self.im_names)
|
[
"523131316@qq.com"
] |
523131316@qq.com
|
3f1c49d72fa189356632d260c761d1405c531bba
|
53309442fbf23b02b9f8181a58e5e988f7c6e8f2
|
/dlk/python/dlk/scripts/pylib/nnlib.py
|
3481d35e730d0618d078acc297ad6c1427d29d78
|
[
"Apache-2.0"
] |
permissive
|
serihiro/blueoil
|
a12baa1224d2a7056de14e74bceebe7f80e30de8
|
e538a08cb149c6f630263905819cc8c53a0a6081
|
refs/heads/master
| 2020-07-25T11:53:48.940466
| 2019-09-12T06:38:12
| 2019-09-12T06:38:12
| 208,280,175
| 0
| 0
|
Apache-2.0
| 2019-09-13T14:22:40
| 2019-09-13T14:22:36
| null |
UTF-8
|
Python
| false
| false
| 3,312
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import ctypes as ct
import numpy as np
from numpy.ctypeslib import ndpointer
class NNLib(object):
def __init__(self):
self.lib = None
self.nnlib = None
def load(self, libpath):
self.lib = ct.cdll.LoadLibrary(libpath)
self.lib.network_create.argtypes = []
self.lib.network_create.restype = ct.c_void_p
self.lib.network_init.argtypes = [ct.c_void_p]
self.lib.network_init.restype = ct.c_bool
self.lib.network_delete.argtypes = [ct.c_void_p]
self.lib.network_delete.restype = None
self.lib.network_get_input_rank.argtypes = [ct.c_void_p]
self.lib.network_get_input_rank.restype = ct.c_int
self.lib.network_get_output_rank.argtypes = [ct.c_void_p]
self.lib.network_get_output_rank.restype = ct.c_int
self.lib.network_get_input_shape.argtypes = [ct.c_void_p, ndpointer(ct.c_int32, flags="C_CONTIGUOUS")]
self.lib.network_get_input_shape.restype = None
self.lib.network_get_output_shape.argtypes = [ct.c_void_p, ndpointer(ct.c_int32, flags="C_CONTIGUOUS")]
self.lib.network_get_output_shape.restype = None
self.lib.network_run.argtypes = [
ct.c_void_p,
ndpointer(
ct.c_float,
flags="C_CONTIGUOUS"),
ndpointer(
ct.c_float,
flags="C_CONTIGUOUS"),
]
self.lib.network_run.restype = None
self.nnlib = self.lib.network_create()
return True
def init(self):
return self.lib.network_init(self.nnlib)
def delete(self):
if self.nnlib:
self.lib.network_delete(self.nnlib)
self.nnlib = None
self.lib = None
def __del__(self):
self.delete()
def get_input_rank(self):
return self.lib.network_get_input_rank(self.nnlib)
def get_output_rank(self):
return self.lib.network_get_output_rank(self.nnlib)
def get_input_shape(self):
r = self.get_input_rank()
s = np.zeros(r, np.int32)
self.lib.network_get_input_shape(self.nnlib, s)
return tuple(s)
def get_output_shape(self):
r = self.get_output_rank()
s = np.zeros(r, np.int32)
self.lib.network_get_output_shape(self.nnlib, s)
return tuple(s)
def run(self, tensor):
input = tensor.flatten().astype(np.float32)
output = np.zeros((self.get_output_shape()), np.float32)
self.lib.network_run(
self.nnlib,
input,
output)
return output
|
[
"matsuda@leapmind.io"
] |
matsuda@leapmind.io
|
44b803a1e237f3e47252977e2bb7d9fe4553a3ca
|
d39af45744a6220d30895126f2fc531e4d9ef2af
|
/tests/plugin2.py
|
5cb8fbb6f23b82d34c8a17108cb5644f4fb3479c
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
hugovk/coveragepy
|
dce0e11e3244cd9f79d0c5a432dac7a7cce74b69
|
2e09055737aaa7a4c3d61bd1cb700ef528827036
|
refs/heads/master
| 2023-06-27T08:39:02.120975
| 2023-06-07T10:28:18
| 2023-06-07T10:28:18
| 19,588,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,293
|
py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""A file tracer plugin for test_plugins.py to import."""
from __future__ import annotations
import os.path
from types import FrameType
from typing import Any, Optional, Set, Tuple
from coverage import CoveragePlugin, FileReporter, FileTracer
from coverage.plugin_support import Plugins
from coverage.types import TLineNo
try:
import third.render # pylint: disable=unused-import
except ImportError:
# This plugin is used in a few tests. One of them has the third.render
# module, but most don't. We need to import it but not use it, so just
# try importing it and it's OK if the module doesn't exist.
pass
class Plugin(CoveragePlugin):
"""A file tracer plugin for testing."""
def file_tracer(self, filename: str) -> Optional[FileTracer]:
if "render.py" in filename:
return RenderFileTracer()
return None
def file_reporter(self, filename: str) -> FileReporter:
return MyFileReporter(filename)
class RenderFileTracer(FileTracer):
"""A FileTracer using information from the caller."""
def has_dynamic_source_filename(self) -> bool:
return True
def dynamic_source_filename(
self,
filename: str,
frame: FrameType,
) -> Optional[str]:
if frame.f_code.co_name != "render":
return None
source_filename: str = os.path.abspath(frame.f_locals['filename'])
return source_filename
def line_number_range(self, frame: FrameType) -> Tuple[TLineNo, TLineNo]:
lineno = frame.f_locals['linenum']
return lineno, lineno+1
class MyFileReporter(FileReporter):
"""A goofy file reporter."""
def lines(self) -> Set[TLineNo]:
# Goofy test arrangement: claim that the file has as many lines as the
# number in its name.
num = os.path.basename(self.filename).split(".")[0].split("_")[1]
return set(range(1, int(num)+1))
def coverage_init(
reg: Plugins,
options: Any, # pylint: disable=unused-argument
) -> None:
"""Called by coverage to initialize the plugins here."""
reg.add_file_tracer(Plugin())
|
[
"ned@nedbatchelder.com"
] |
ned@nedbatchelder.com
|
931e0759257b0d996fd365675e052b85bb3061bd
|
97af8fc69f99073f000ef8da0256c8dcc4b62c5c
|
/graph/migrations/0001_initial.py
|
4b39b43a2ba9aca245c6501aa1888e335177d984
|
[] |
no_license
|
belal-bh/django-mptt-example
|
6af4525de881780688e26b7017013e8b8640860e
|
e341b65af32fbda2fc7f8f04192ca32f5d30d819
|
refs/heads/main
| 2023-03-15T21:44:52.678226
| 2021-03-02T13:40:38
| 2021-03-02T13:40:38
| 320,348,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
# Generated by Django 3.1.4 on 2020-12-04 18:52
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Node',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=30, unique=True)),
('name', models.CharField(max_length=255)),
('is_verified', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=False)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('connection', models.ManyToManyField(related_name='_node_connection_+', to='graph.Node')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='graph.node')),
],
options={
'abstract': False,
},
),
]
|
[
"bh.pro.pust@gmail.com"
] |
bh.pro.pust@gmail.com
|
84f033145a45d5d825a7c732f5c3c30954b010cc
|
576cc83449e10fd3f98281970c46016ea7a5aea2
|
/OpenCV预处理/局部自适应阈值处理inv.py
|
154af1ae5f36d50bf61283ada12b9c43f3c9eb18
|
[] |
no_license
|
HotView/PycharmProjects
|
215ab9edd341e3293daebcf86d97537f8cd28d75
|
61393fe5ba781a8c1216a5cbe7e0d06149a10190
|
refs/heads/master
| 2020-06-02T07:41:53.608742
| 2019-11-13T08:31:57
| 2019-11-13T08:31:57
| 191,085,178
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
# 目标区域为偏暗色,即灰度值最小的地方为感兴趣区
import cv2
import numpy as np
def Nothing(val):
size = cv2.getTrackbarPos("size","gray")
param = cv2.getTrackbarPos("param","gray")
thresh = cv2.adaptiveThreshold(gray,255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,2*size+1, param)
cv2.imshow("thresh", thresh)
img = cv2.imread("image/hand01.jpg")
img = cv2.GaussianBlur(img,(5,5),1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#cv2.namedWindow("thresh")
cv2.namedWindow("gray")
cv2.createTrackbar("size","gray",0,300,Nothing)
cv2.createTrackbar("param","gray",0,100,Nothing)
cv2.imshow("gray",gray)
cv2.waitKey(0)
|
[
"864773190@qq.com"
] |
864773190@qq.com
|
3bb894282823a496a43313fedc66a4b3f100aa8b
|
49d416e5c94540ba19ce1218dd253158b9f1c37c
|
/src/allennlp_plugins/__init__.py
|
091c329d91186498e95efe351648d91fe8919931
|
[
"Apache-2.0"
] |
permissive
|
j6mes/eacl2021-debias-finetuning
|
b05ba45508cef45e96cdf78d2182fe0a6edb541c
|
f3773c4a608c042c132bfe54e7cb63b142291c93
|
refs/heads/main
| 2023-03-26T01:39:11.674216
| 2021-03-27T21:35:52
| 2021-03-27T21:35:52
| 351,411,446
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from debias_finetuning.commands.finetune_l2 import *
from debias_finetuning.commands.finetune_ewc import *
from debias_finetuning.commands.evaluate_with_overwrite import *
|
[
"james@jthorne.co.uk"
] |
james@jthorne.co.uk
|
13b72aef7c8e90f7fff0839b4af94da5347f0931
|
c0cbcf1d1b42495fdb70ad62d3bb954be2b0c322
|
/learn/FileIO.py
|
d480471d13f9cbe0d21948a8307bd74da1ffd13e
|
[] |
no_license
|
pranjalparmar/Learn-Python
|
98ec11e9cab3d29d5e1e176e4b9ec3f4e232e78e
|
c377f8c0eca791b43ae55fae797e4f3fb6a3bcfc
|
refs/heads/main
| 2023-02-28T17:42:53.271860
| 2021-02-06T11:18:45
| 2021-02-06T11:18:45
| 336,462,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
f = open("pranjal.txt","rt")
print(f.readlines())
# print(f.readline())
# print(f.readline())
# content = (f.readline())
# content = (f.readline())
# print("1",content)
# for line in f:
# print(line,end="")
f.close()
|
[
"noreply@github.com"
] |
pranjalparmar.noreply@github.com
|
f1407cc95fbc90c393aa118b32271d74fc4e8720
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2690/60678/248819.py
|
09b1630a9d7bb564180ece9ce7e5ce73c668ba77
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
times = int(input())
for loopTimes in range(0, times):
input()
stringList = input().split()
stringM = stringList[0]
stringS = stringList[1]
stringTest = stringM + ' ' + stringS
listM = list(stringM)
listS = list(stringS)
for i in range(0, len(stringS)):
while stringM.find(stringS[i]) != -1:
listM[stringM.find(stringS[i])] = '1'
stringM = ''.join(listM)
for i in range(0, len(listM)):
if listM[i] != '1':
listM[i] = '0'
stringM = ''.join(listM)
stringM = stringM.split('0')
index = 0
while index < len(stringM):
if stringM[index] == '':
del stringM[index]
index -= 1
index += 1
# print(len(stringM))
if len(stringM) == 4:
print(len(stringM))
else:
print(stringTest)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
2304ae329181fdc87d288da984a9d02c5739dcb5
|
ace7e98719c756cff4e4baf7c92e546cbc0b92ca
|
/LintCode/6-Linked List/2.covertSortedListToBalancedBST.py
|
a098c7d34acc77b54dd52275e5165369ed6a2091
|
[] |
no_license
|
armsky/OnlineJudge
|
f4159326c92a794695cca8a162280fef32f95a2a
|
c658b78c920aa94c25b3d932cd7e46c0df82b19a
|
refs/heads/master
| 2020-04-15T01:21:18.158217
| 2015-12-11T03:05:28
| 2015-12-11T03:05:28
| 21,989,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,874
|
py
|
"""
Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
Example
2
1->2->3 => / \
1 3
"""
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
cur = None
def sortedListToBST(self, head):
global cur
if not head:
return None
size = self.getLen(head)
cur = head
return self.build(size)
def getLen(self, a):
n = 0
while a:
a = a.next
n += 1
return n
def build(self, size):
global cur
if size <= 0:
return None
left = self.build(size/2)
root = TreeNode(cur.val)
cur = cur.next
right = self.build(size -1 -size/2)
root.left = left
root.right = right
return root
# O(n log n) time
# No need to keep a global variable current_node
def sortedListToBST2(self, head):
if not head:
return head
size = self.getLen(head)
return self.construct(head, size)
def construct(self, head, size):
if not head or size==0:
return None
root = self.getNode(size/2, head)
root.left = self.construct(head, size/2)
root.right = self.construct(self.getNode(size/2 + 1, head), size - size/2 -1)
return root
def getNode(self, n, head):
for i in range(n):
head = head.next
return head
so = Solution()
a = ListNode(1)
a.next = ListNode(2)
a.next.next = ListNode(3)
print so.sortedListToBST(a).val
|
[
"armsky1988@gmail.com"
] |
armsky1988@gmail.com
|
3e472ab7973b0dfe437944cf0b307c2745160fd3
|
f81c629865f0493500eaa2ab4e3337fd6603cf0c
|
/loqusdb/commands/cli.py
|
6bc3d75fe6e36e25ab05477e35b792c64c45b9f6
|
[] |
no_license
|
robinandeer/loqusdb
|
fd8a49e325ae36169e16025793156e0a978c6716
|
15ae953589bbe51b24e549c03986bf2ea0ef6b0e
|
refs/heads/master
| 2021-01-18T10:47:44.870986
| 2016-03-02T13:10:34
| 2016-03-02T13:10:34
| 52,794,952
| 0
| 0
| null | 2016-02-29T13:50:46
| 2016-02-29T13:50:46
| null |
UTF-8
|
Python
| false
| false
| 2,473
|
py
|
import click
from loqusdb.log import LEVELS, init_log
from loqusdb import logger, __version__
from loqusdb.plugins import MongoAdapter
@click.group()
@click.option('-db', '--database',
default='loqusdb',
show_default=True,
)
@click.option('-u', '--username',
type=str
)
@click.option('-p', '--password',
type=str
)
@click.option('-port', '--port',
default=27017,
show_default=True,
help='Specify the port where to look for the mongo database.'
)
@click.option('-h', '--host',
default='localhost',
show_default=True,
help='Specify the host where to look for the mongo database.'
)
@click.option('-b', '--backend',
default='mongo',
show_default=True,
type=click.Choice(['mongo',]),
help='Specify what backend to use.'
)
@click.option('-c', '--conn_host',
default='mongodb://',
show_default=True,
help='Used for testing.'
)
@click.option('-l', '--logfile',
type=click.Path(exists=False),
help=u"Path to log file. If none logging is "\
"printed to stderr."
)
@click.option('-v', '--verbose', count=True, default=1)
@click.version_option(__version__)
@click.pass_context
def cli(ctx, conn_host, database, username, password, port, host, verbose,
logfile, backend):
"""loqusdb: manage a local variant count database."""
# configure root logger to print to STDERR
loglevel = LEVELS.get(min(verbose,1), "INFO")
init_log(
logger = logger,
filename = logfile,
loglevel = loglevel
)
# mongo uri looks like:
#mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
uri = None
if username and password:
uri = "{0}{1}:{2}@{3}:{4}/{5}".format(
conn_host, username, password, host, port, database
)
logger.info('uri={0}'.format(uri))
adapter = MongoAdapter()
adapter.connect(
host=host,
port=port,
database=database,
uri=uri
)
ctx.obj = {}
ctx.obj['db'] = database
ctx.obj['user'] = username
ctx.obj['password'] = password
ctx.obj['port'] = port
ctx.obj['host'] = host
ctx.obj['adapter'] = adapter
|
[
"monsunas@gmail.com"
] |
monsunas@gmail.com
|
0f70d2f0a0efc9c22f33694d5afcb0cfafa6536a
|
3c92c3f633b613a62fb67476fd617e1140133880
|
/leetcode/1541. Minimum Insertions to Balance a Parentheses String.py
|
096c25dacba75e51f24aca72ee3872d79ab37936
|
[] |
no_license
|
cuiy0006/Algorithms
|
2787f36f8164ded5252a006f723b570c9091bee9
|
00fd1397b65c68a303fcf963db3e28cd35c1c003
|
refs/heads/master
| 2023-03-31T13:55:59.191857
| 2023-03-31T03:39:42
| 2023-03-31T03:39:42
| 75,001,651
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
class Solution:
def minInsertions(self, s: str) -> int:
left = 0
cnt = 0
i = 0
while i < len(s):
if s[i] == '(':
left += 1
i += 1
else:
if i == len(s) - 1 or s[i+1] != ')':
cnt += 1
i += 1
else:
i += 2
if left == 0:
cnt += 1
else:
left -= 1
return cnt + left * 2
|
[
"noreply@github.com"
] |
cuiy0006.noreply@github.com
|
0001c01bc8a101706f658bcd83d4b303d1d9be1c
|
5aa80aab7a75d76b0aa838bf8f74a276a12c876e
|
/src/config/device-manager/device_manager/ansible_base.py
|
a0ed6248cc8be3bd6bb180ff2f16a4e6610a3fbe
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
tungstenfabric/tf-controller
|
83b6d58afadb5697b540b5345711a5b2af90d201
|
f825fde287f4eb2089aba2225ca73eeab3888040
|
refs/heads/master
| 2023-08-28T02:56:27.329584
| 2023-08-20T12:15:38
| 2023-08-20T12:31:34
| 231,070,970
| 55
| 29
|
Apache-2.0
| 2023-07-23T01:38:17
| 2019-12-31T10:24:38
|
C++
|
UTF-8
|
Python
| false
| false
| 6,410
|
py
|
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation plugin base class for device config module.
The specific plugins should inherit from this class.
"""
import abc
from builtins import object
from builtins import str
from .imports import import_ansible_plugins
#
# Base Class for all plugins. pluigns must implement all abstract methods
#
class AnsibleBase(object):
_plugins = {}
class PluginError(Exception):
"""Exception class to indicate plugin error."""
def __init__(self, plugin_info):
"""Initialize the exception with plugin info."""
self.plugin_info = plugin_info
# end __init__
def __str__(self):
"""Provide plugin info in exception details."""
return "Ansible Plugin Error, Configuration = %s" % \
str(self.plugin_info)
# end __str__
# end PluginError
class PluginsRegistrationFailed(Exception):
"""Exception class to indicate plugin registration error."""
def __init__(self, exceptions):
"""Initialize the exception with nested exceptions."""
self.exceptions = exceptions
# end __init__
def __str__(self):
"""Provide details of nested exception in exception message."""
ex_mesg = "Plugin Registrations Failed:\n"
for ex in self.exceptions or []:
ex_mesg += ex + "\n"
return ex_mesg
# end __str__
# end PluginsRegistrationFailed
def __init__(self, logger):
"""Initialize the plugin."""
self._logger = logger
self.commit_stats = {
'last_commit_time': '',
'last_commit_duration': '',
'commit_status_message': '',
'total_commits_sent_since_up': 0,
}
self.initialize()
self.device_connect()
# end __init__
# instantiate a plugin dynamically
@classmethod
def plugin(cls, vendor, product, params, logger):
pr = params.get("physical_router")
name = str(pr.physical_router_role) + ":" + \
str(vendor) + ":" + str(product)
if pr.physical_router_role and vendor and product:
pconf = AnsibleBase._plugins.get(pr.physical_router_role)
if pconf:
logger.info(
"Found ansible plugin pr=%s, role/vendor/product=%s" %
(pr.uuid, name))
pconf = pconf[0] # for now one only
inst_cls = pconf.get('class')
return inst_cls(logger, params)
logger.warning(
"No ansible plugin pr=%s, role/vendor/product=%s" %
(pr.uuid, name))
return None
# end plugin
# validate plugin name
def verify_plugin(self, vendor, product, role):
return self.is_role_supported(role)
# end verify_plugin
# register all plugins with device manager
@classmethod
def register_plugins(cls):
# make sure modules are loaded
import_ansible_plugins()
# register plugins, find all leaf implementation classes derived from
# this class
subclasses = set()
work = [cls]
while work:
parent = work.pop()
if not parent.__subclasses__():
subclasses.add(parent)
continue
for child in parent.__subclasses__():
if child not in subclasses:
work.append(child)
# register all plugins,
# if there is any exception, continue to register all other plugins,
# finally throw one single exception to the caller
exceptions = []
for scls in subclasses or []:
try:
scls.register()
except AnsibleBase.PluginError as e:
exceptions.append(str(e))
if exceptions:
raise cls.PluginsRegistrationFailed(exceptions)
# end register_plugins
@classmethod
def register(cls, plugin_info):
if not plugin_info or not plugin_info.get("roles"):
raise AnsibleBase.PluginError(plugin_info)
for role in plugin_info.get("roles"):
AnsibleBase._plugins.setdefault(
role.lower(), []).append(plugin_info)
# end register
@classmethod
def is_role_supported(cls, role):
"""Check if plugin is capable of supporting role."""
return False
# end is_role_supported
@abc.abstractmethod
def plugin_init(self, is_delete=False):
"""Initialize plugin."""
# end plugin_init
@abc.abstractmethod
def initialize(self):
"""Initialize local data structures."""
# end initialize
def validate_device(self):
return True
# def validate_device
@abc.abstractmethod
def update(self, params):
"""Update plugin intialization params."""
# end update
def clear(self):
"""Clear connections and data structures."""
self.initialize()
self.device_disconnect()
# end clear
@abc.abstractmethod
def device_connect(self):
"""Initialize the device connection and get the handle."""
pass
# end device_connect
@abc.abstractmethod
def device_disconnect(self):
"""Delete the device connection and and reset the handle."""
pass
# end device_disconnect
@abc.abstractmethod
def retry(self):
"""Retry send conf or not."""
return False
# end retry
@abc.abstractmethod
def device_get(self, filters={}):
"""Retrieve configuration from device for given filter parameters."""
return {}
# end device_get
def device_get_config(self, filters={}):
"""Retrieve entire device current configuration."""
return {}
# end device_get_config
@abc.abstractmethod
def get_commit_stats(self):
"""Return Commit Statistics if any."""
return self.commit_stats
# end device_get
@abc.abstractmethod
def push_conf(self, feature_configs=None, is_delete=False):
"""Push config to device."""
return 0
# end push_conf
@abc.abstractmethod
def get_service_status(self, service_params={}):
"""Get service status for a given service."""
return {}
# end get_service_status
# end AnsibleBase
|
[
"andrey-mp@yandex.ru"
] |
andrey-mp@yandex.ru
|
958e29edf8b1e663aabc6944aef2aae04ecbf3d5
|
f995860ad78fc266d04b03c3478c74e989d8b568
|
/PE/pe0493.py
|
5ea1aa17c923d1333d653be374eade36e150d859
|
[] |
no_license
|
196884/Python
|
edd0234fd72a40d7a0b3310776edcaa8bda74478
|
8dc2e7a32dd350227cde748600e713dc3eea3f4a
|
refs/heads/master
| 2016-09-06T19:26:19.860746
| 2015-11-09T00:09:23
| 2015-11-09T00:09:23
| 28,167,634
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
from mpmath import *
mp.dps = 30
def sortFn((a, b)):
return a
def evolvePos(c, i):
# c is the configuration
# i is the index chosen
r = list(c)
(ni, ki) = c[i]
f = ni * ki
if ki > 1:
r[i] = (ni, ki-1)
else:
r.pop(i)
if ni > 1:
n = len(r)
found = False
for i in range(0, n):
if r[i][0] == ni - 1:
r[i] = (r[i][0], r[i][1]+1)
found = True
if not found:
r.append((ni-1, 1))
r.sort(key = sortFn)
return (f, tuple(r))
def handlePick(d, total):
r = dict()
for c, proba in d.iteritems():
nc = len(c)
for i in range(0, nc):
(f, cb) = evolvePos(c, i)
thisProba = proba * mpf(f) / mpf(total)
prevProba = r.get(cb, mpf(0))
r[cb] = prevProba + thisProba
return r
def nbColors(c):
l = list(c)
(n, k) = l[-1]
if n == 10:
return 7 - k
else:
return 7
def solve():
# Bruteforcing it...
d = dict()
d[((9,1),(10,6))] = mpf(1)
total = 69
for k in range(0, 19):
d = handlePick(d, total)
total -= 1
r = mpf(0)
for c, p in d.iteritems():
n = nbColors(c)
r = r + mpf(n) * p
return r
if __name__ == "__main__":
result = solve()
print "Result: %s" % result
|
[
"regis.dupont+git@m4x.org"
] |
regis.dupont+git@m4x.org
|
9ad0fc7bf19e3d9004fa97afe0d0cfd173119ba0
|
ce4f7f8e9336b8bbf9cbfe147d922e37034ab6c3
|
/old/ABC152C.py
|
26f273585904a48a570ae025e08fea57f3a7bcb9
|
[] |
no_license
|
kussy-tessy/atcoder
|
5604919747242ee9740b9131bb6e168e96af0151
|
ee917fa5a5218d4a9e72f710d0d844e7c203f13b
|
refs/heads/master
| 2023-07-21T09:25:15.464881
| 2021-09-04T14:06:02
| 2021-09-04T14:06:02
| 311,221,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
# print('input >>')
N = int(input())
P = list(map(int,(input().split())))
ans = 0
now = P[0]
for p in P:
if now >= p:
ans += 1
now = p
# print('-----output-----')
print(ans)
|
[
"teppei04285000@gmail.com"
] |
teppei04285000@gmail.com
|
d53121b2ae4fd928addc43c3fa35c1600044f7fe
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/common/dag.py
|
d553317dfc081b51702747ecdfc7fda8fb0ea527
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,995
|
py
|
"""DAG class."""
from collections import deque
from collections import OrderedDict
class DAG:
"""DAG."""
def __init__(self):
"""Init DAG."""
self.nodes = OrderedDict()
def add_node(self, node):
"""Add node."""
if node not in self.nodes:
self.nodes[node] = set()
def remove_node(self, node):
"""Remove node."""
if node in self.nodes:
self.nodes.pop(node)
for pre_node, nodes in iter(self.nodes.items()):
if node in nodes:
nodes.remove(node)
def add_edge(self, pre_node, node):
"""Add edge."""
if pre_node not in self.nodes or node not in self.nodes:
return
self.nodes[pre_node].add(node)
def remove_edge(self, pre_node, node):
"""Remove edge."""
if pre_node in self.nodes and node in self.nodes[pre_node]:
self.nodes[pre_node].remove(node)
def from_dict(self, dict_value):
"""Construct DAG from dict."""
self.nodes = OrderedDict()
for node in iter(dict_value.keys()):
self.add_node(node)
for pre_node, nodes in iter(dict_value.items()):
if not isinstance(nodes, list):
raise TypeError('dict values must be lists')
for node in nodes:
self.add_edge(pre_node, node)
def next_nodes(self, node):
"""Get all successor of the node."""
return list(self.nodes[node])
def pre_nodes(self, node):
"""Get all predecessor of the node."""
return [item for item in self.nodes if node in self.nodes[item]]
def topological_sort(self):
"""Topological sort."""
in_degree = {node: 0 for node in self.nodes}
out_degree = {node: 0 for node in self.nodes}
for node in self.nodes:
out_degree[node] = len(node)
for next_node in self.nodes[node]:
in_degree[next_node] += 1
ret = []
stack = deque()
for node in in_degree:
if in_degree[node] == 0:
stack.append(node)
while len(stack) > 0:
node = stack.pop()
for item in self.nodes[node]:
in_degree[item] -= 1
if in_degree[item] == 0:
stack.append(item)
ret.append(node)
if len(ret) != len(self.nodes):
raise ValueError("Not a directed acyclic graph")
return ret
def ind_nodes(self):
"""Independent nodes."""
in_degree = {node: 0 for node in self.nodes}
for node in self.nodes:
for next_node in self.nodes[node]:
in_degree[next_node] += 1
ret = set(node for node in self.nodes if in_degree[node] == 0)
return ret
def size(self):
"""Return the size of graph."""
return len(self.nodes)
|
[
"zhangjiajin@huawei.com"
] |
zhangjiajin@huawei.com
|
349d0a7b86159e2b854df8311790ec362c606538
|
a561673adf29beb7939052b898dad5bf9167cefc
|
/sdk/python/lib/test/langhost/resource_thens/test_resource_thens.py
|
a970b60ef82197cc50194e0d28317a312639a605
|
[
"Apache-2.0"
] |
permissive
|
orionstudt/pulumi
|
50fd75d4ec7bb48646cd3c83198afcf4a556a5fa
|
7ef0b83c0cc7c4f9093e2a8fc0303e875d35c15c
|
refs/heads/master
| 2023-08-12T13:57:32.605402
| 2021-10-18T12:24:46
| 2021-10-18T12:24:46
| 312,097,288
| 0
| 1
|
Apache-2.0
| 2021-01-11T17:12:44
| 2020-11-11T21:43:03
| null |
UTF-8
|
Python
| false
| false
| 2,961
|
py
|
# Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
from ..util import LanghostTest
class ResourceThensTest(LanghostTest):
"""
Test that tests Pulumi's ability to track dependencies between resources.
ResourceA has an (unknown during preview) output property that ResourceB
depends on. In all cases, the SDK must inform the engine that ResourceB
depends on ResourceA. When not doing previews, ResourceB has a partial view
of ResourceA's properties.
"""
def test_resource_thens(self):
self.run_test(
program=path.join(self.base_path(), "resource_thens"),
expected_resource_count=2)
def register_resource(self, _ctx, _dry_run, ty, name, _resource, _dependencies, _parent, _custom, protect,
_provider, _property_deps, _delete_before_replace, _ignore_changes, _version, _import,
_replace_on_changes):
if ty == "test:index:ResourceA":
self.assertEqual(name, "resourceA")
self.assertDictEqual(_resource, {"inprop": 777, "inprop_2": 42})
urn = self.make_urn(ty, name)
res_id = ""
props = {}
if not _dry_run:
res_id = name
props["outprop"] = "output yeah"
return {
"urn": urn,
"id": res_id,
"object": props
}
if ty == "test:index:ResourceB":
self.assertEqual(name, "resourceB")
self.assertListEqual(_dependencies, ["test:index:ResourceA::resourceA"])
if _dry_run:
self.assertDictEqual(_resource, {
# other_in is unknown, so it is not in the dictionary.
# other_out is unknown, so it is not in the dictionary.
# other_id is also unknown so it is not in the dictionary
})
else:
self.assertDictEqual(_resource, {
"other_in": 777,
"other_out": "output yeah",
"other_id": "resourceA",
})
res_id = ""
if not _dry_run:
res_id = name
return {
"urn": self.make_urn(ty, name),
"id": res_id,
"object": {}
}
self.fail(f"unknown resource type: {ty}")
|
[
"noreply@github.com"
] |
orionstudt.noreply@github.com
|
d4d1407c5e94cdaedf63ccc88e1092cafd364240
|
d77e61d5a9eb08736d5c3621896a66ab970ccea6
|
/python/problems/array/remove_duplicates_in_place_sorted_array.py
|
1e9ee54b027ac37bddc5ee0063c85f820184fad4
|
[] |
no_license
|
santhosh-kumar/AlgorithmsAndDataStructures
|
edc1a296746e2d2b0e1d4c748d050fe12af7b65f
|
11f4d25cb211740514c119a60962d075a0817abd
|
refs/heads/master
| 2022-11-15T00:22:53.930170
| 2020-07-10T03:31:30
| 2020-07-10T03:31:30
| 269,263,401
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,888
|
py
|
"""
Remove Duplicates In-Place For a Sorted Array
Given a sorted array, the task is to remove the duplicate elements from the array.
Examples:
Input : arr[] = {2, 2, 2, 2, 2}
Output : arr[] = {2}
new size = 1
Input : arr[] = {1, 2, 2, 3, 4, 4, 4, 5, 5}
Output : arr[] = {1, 2, 3, 4, 5}
new size = 5
"""
from common.problem import Problem
class RemoveDuplicatesInPlaceSortedArray(Problem):
"""
RemoveDuplicatesInPlaceSortedArray
"""
PROBLEM_NAME = "RemoveDuplicatesInPlaceSortedArray"
def __init__(self, input_list):
"""RemoveDuplicatesInPlaceSortedArray
Args:
input_list: Contains a list of integers
Returns:
None
Raises:
None
"""
assert (len(input_list) > 0)
super().__init__(self.PROBLEM_NAME)
self.input_list = input_list
def solve(self):
"""Solve the problem
Note: The O(n) runtime and O(1) (space).
Args:
Returns:
integer
Raises:
None
"""
print("Solving {} problem ...".format(self.PROBLEM_NAME))
i = 0
while i < len(self.input_list):
j = i + 1
# iterate till we find the next non-duplicate and an increasing value
while j < len(self.input_list) and (
self.input_list[i] == self.input_list[j] or self.input_list[i] > self.input_list[j]):
j = j + 1
# swap with the next position if within the allowed size
if (i + 1) < len(self.input_list) and j < len(self.input_list):
self.input_list[i + 1], self.input_list[j] = self.input_list[j], self.input_list[i + 1]
else:
# we have reached the end w.r.t. j and hence return now
return i
i = i + 1
return i
|
[
"santhoshkumar.sunderrajan@gmail.com"
] |
santhoshkumar.sunderrajan@gmail.com
|
13e4c32ff331ce50e66711f5334464b084c2f06b
|
9a3430749300a93b34b20e37505c8b1c0f7a79cf
|
/fixrequests.py
|
bf485d6785d7ccdc18e2e9a35e473522715d4c5c
|
[] |
no_license
|
qyguo/makegridpacks
|
c127bcb83ea24bc1a6f06d7d5dce2eb4d5735367
|
cd4beb1e92dbec3f074305d07c15e2f10c7ae67c
|
refs/heads/master
| 2020-03-31T22:08:16.978758
| 2018-10-10T18:34:35
| 2018-10-10T18:34:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
#!/usr/bin/env python
import argparse
from makegridpacks import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", "-n", action="store_true", help="don't send anything to McM")
parser.add_argument("--times", "-t", action="store_true", help="get the times from McM for all requests")
args = parser.parse_args()
with RequestQueue() as queue:
for productionmode in "ggH", "VBF", "WplusH", "WminusH", "ZH", "ttH":
for decaymode in "4l", "2l2nu", "2l2q":
for mass in getmasses(productionmode, decaymode):
sample = POWHEGJHUGenMassScanMCSample(productionmode, decaymode, mass)
if (sample.needsupdate or args.times) and sample.prepid and os.path.exists(sample.cvmfstarball):
sample.gettimepereventfromMcM()
print sample
if sample.needsupdate and not args.dry_run:
queue.addrequest(sample, useprepid=True)
|
[
"jroskes1@jhu.edu"
] |
jroskes1@jhu.edu
|
e6440664549037faeeda37d40990091d4fdf3dbc
|
775f887ab0933c8bb9263febceb702974966bb48
|
/packages/pyright-internal/src/tests/samples/expressions8.py
|
76f1a96de2a800e729e324c34729664d0230e80d
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
isabella232/pyright
|
160a4d9ce366cb61946949f9d5aebe7457539c67
|
a192486099503353413e02078c41d0d82bd696e8
|
refs/heads/master
| 2023-03-13T05:04:51.852745
| 2021-03-03T07:51:18
| 2021-03-03T07:51:18
| 344,101,663
| 0
| 0
|
NOASSERTION
| 2021-03-03T11:24:10
| 2021-03-03T11:21:38
| null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# This sample verifies that binary expressions like "less than"
# work if the operands are constrained TypeVars.
from abc import abstractmethod
from typing import Protocol, TypeVar
_T = TypeVar("_T")
class ComparableTo(Protocol[_T]):
@abstractmethod
def __lt__(self, x: _T) -> bool:
pass
def custom_compare(a: ComparableTo[_T], b: _T) -> bool:
return a < b
custom_compare("first", "second")
custom_compare(3, 2)
# This should generate an error.
custom_compare(3, "hi")
|
[
"erictr@microsoft.com"
] |
erictr@microsoft.com
|
36d9e40222f66557c42c81c7b1deadefa3382594
|
87130a19d9fa51d9b500d73ea9717ba16465f0f6
|
/backend/api/errors.py
|
d87230fd995b428781bf85436ec04c0686447947
|
[] |
no_license
|
toyscript/toyscript
|
f4f236a8d1941565e6e5ed86bbb6417db73e5e2f
|
ac31a8ccf0f77226d7def3c6cb2744c521a89ff9
|
refs/heads/main
| 2023-06-06T19:32:45.642996
| 2021-07-07T04:50:42
| 2021-07-07T04:50:42
| 360,021,820
| 1
| 6
| null | 2021-06-19T08:40:39
| 2021-04-21T03:32:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 185
|
py
|
class MovieDoesNotExist(Exception):
def __init__(self, message="해당 영화를 찾을 수 없습니다.", status=404):
self.message = message
self.status = status
|
[
"swamys@naver.com"
] |
swamys@naver.com
|
5312710fddbf6c8c78ed25f3ba26ec034c290fe6
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/program/database/data/CategoryDBAdapter.pyi
|
67dbfd06673607d5cb56cc5c8b3474cf3ffe7bce
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
pyi
|
import java.lang
class CategoryDBAdapter(object):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
258e40712331ad317f9ddc190c8e084e68f8b142
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4137/codes/1575_1334.py
|
4a02531f8dd05fe3a3c2859b82ad6825f61ec54d
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
print("Adicao: ", 5 + 3)
print("Subtracao: ", 10 - 2)
print("Multiplicacao: ", 2*4)
print("Divisao: ", 16/2)
print("Resto: ", 16//2 )
print("Potenciacao: ", 2 ** 3)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
82c6cd39e11ca9c71e9a7af08bfa8d5283cb0013
|
4cacf8188446170e0b4a14b05021bbd595c4db53
|
/pyrolite/mineral/transform.py
|
051b6d30d068c1f55e1c32d90c780845f6c5592b
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
JustinGOSSES/pyrolite
|
2d145583344f79e8f935ed19fa00037d42969664
|
21eb5b28d9295625241b73b820fc8892b00fc6b0
|
refs/heads/master
| 2020-12-23T11:26:55.078871
| 2020-01-10T09:03:22
| 2020-01-10T09:03:22
| 237,136,389
| 1
| 0
|
NOASSERTION
| 2020-01-30T04:08:52
| 2020-01-30T04:08:51
| null |
UTF-8
|
Python
| false
| false
| 5,271
|
py
|
import pandas as pd
import numpy as np
import periodictable as pt
from ..util.pd import to_frame
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
def formula_to_elemental(formula, weight=True):
"""Convert a periodictable.formulas.Formula to elemental composition."""
formula = pt.formula(formula)
fmass = formula.mass
composition = {}
if weight:
for a, c in formula.atoms.items():
composition[str(a)] = (c * a.mass) / fmass
else:
atoms = sum([c for a, c in formula.atoms.items()])
for a, c in formula.atoms.items():
composition[str(a)] = c / atoms
return composition
def merge_formulae(formulas):
"""
Combine multiple formulae into one. Particularly useful for defining oxide mineral
formulae.
Parameters
-----------
formulas: iterable
Iterable of multiple formulae to merge into a single larger molecular formulae.
"""
molecule = pt.formula("")
for f in formulas:
molecule += pt.formula(f)
return molecule
def parse_composition(composition):
"""
Parse a composition to provide an ionic elemental version in the form of a
pandas.Series. Currently accepts pandas.Series, periodictable.formulas.Formula
and structures which will directly convert to pandas.Series (list of tuples, dict).
Parameters
-----------
composition : :class:`pandas.Series` | :class:`periodictable.formulas.Formula`
Formulation of composition to parse.
"""
if composition is not None:
if isinstance(composition, pd.Series):
# convert to molecular oxides, then to formula, then to wt% elemental
components = [pt.formula(c) for c in composition.index]
values = composition.values
formula = merge_formulae(
[v / c.mass * c for v, c in zip(values, components)]
)
return pd.Series(formula_to_elemental(formula))
elif isinstance(composition, pt.formulas.Formula):
return pd.Series(formula_to_elemental(composition))
else:
return parse_composition(pd.Series(composition))
def recalc_cations(
df,
ideal_cations=4,
ideal_oxygens=6,
Fe_species=["FeO", "Fe", "Fe2O3"],
oxygen_constrained=False,
):
"""
Recalculate a composition to a.p.f.u.
"""
assert ideal_cations is not None or ideal_oxygens is not None
# if Fe2O3 and FeO are specified, calculate based on oxygen
moles = to_frame(df)
moles = moles.div([pt.formula(c).mass for c in moles.columns])
moles = moles.where(~np.isclose(moles, 0.0), np.nan)
# determine whether oxygen is an open or closed system
count_iron_species = np.array([i in moles.columns for i in Fe_species]).sum()
oxygen_constrained = oxygen_constrained
if not oxygen_constrained:
if count_iron_species > 1: # check that only one is defined
oxygen_constrained = (
count_iron_species
- pd.isnull(moles.loc[:, Fe_species]).all(axis=1).sum()
) > 1
if oxygen_constrained:
logger.info("Multiple iron species defined. Calculating using oxygen.")
else:
logger.info("Single iron species defined. Calculating using cations.")
components = moles.columns
as_oxides = len(list(pt.formula(components[0]).atoms)) > 1
schema = []
# if oxygen_constrained: # need to specifically separate Fe2 and Fe3
if as_oxides:
parts = [pt.formula(c).atoms for c in components]
for p in parts:
oxygens = p[pt.O]
other_components = [i for i in list(p) if not i == pt.O]
assert len(other_components) == 1 # need to be simple oxides
other = other_components[0]
charge = oxygens * 2 / p[other]
ion = other.ion[charge]
schema.append({str(ion): p[other], "O": oxygens})
else:
# elemental composition
parts = components
for part in parts:
p = list(pt.formula(part).atoms)[0]
if p.charge != 0:
charge = p.charge
else:
charge = p.default_charge
schema.append({p.ion[charge]: 1})
ref = pd.DataFrame(data=schema)
ref.columns = ref.columns.map(str)
ref.index = components
cation_masses = {c: pt.formula(c).mass for c in ref.columns}
oxygen_index = [i for i in ref.columns if "O" in i][0]
ref = ref.loc[:, [i for i in ref.columns if not i == oxygen_index] + [oxygen_index]]
moles_ref = ref.copy(deep=True)
moles_ref.loc[:, :] = (
ref.values * moles.T.values
) # this works for series, not for frame
moles_O = moles_ref[oxygen_index].sum()
moles_cations = (
moles_ref.loc[:, [i for i in moles_ref.columns if not i == oxygen_index]]
.sum()
.sum()
)
if not oxygen_constrained: # oxygen unquantified, try to calculate using cations
scale = ideal_cations / moles_cations
else: # oxygen quantified, try to calculate using oxygen
scale = ideal_oxygens / moles_O
moles_ref *= scale
return moles_ref.sum(axis=0)
|
[
"morgan.j.williams@hotmail.com"
] |
morgan.j.williams@hotmail.com
|
4d1151f44cd7a8e3a0921c051d754940e55df38b
|
45f9abc3c43e021413181e9971d549ba38b030a6
|
/term-1/AIND-Recognizer/asl_utils.py
|
3dc52e9e1183953fd79205891978115077132b4c
|
[] |
no_license
|
SteadBytes/ai-nanodegree
|
01d7c707456585fdf39a83f07ac4def90264324d
|
ba260106dacaaba675a41558e96b2a0998685482
|
refs/heads/master
| 2021-09-11T00:02:11.555421
| 2018-04-04T15:39:57
| 2018-04-04T15:39:57
| 112,731,081
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,122
|
py
|
from asl_data import SinglesData, WordsData
import numpy as np
from IPython.core.display import display, HTML
RAW_FEATURES = ['left-x', 'left-y', 'right-x', 'right-y']
GROUND_FEATURES = ['grnd-rx', 'grnd-ry', 'grnd-lx', 'grnd-ly']
def show_errors(guesses: list, test_set: SinglesData):
""" Print WER and sentence differences in tabular form
:param guesses: list of test item answers, ordered
:param test_set: SinglesData object
:return:
nothing returned, prints error report
WER = (S+I+D)/N but we have no insertions or deletions for isolated words so WER = S/N
"""
S = 0
N = len(test_set.wordlist)
num_test_words = len(test_set.wordlist)
if len(guesses) != num_test_words:
print("Size of guesses must equal number of test words ({})!".format(
num_test_words))
for word_id in range(num_test_words):
if guesses[word_id] != test_set.wordlist[word_id]:
S += 1
print("\n**** WER = {}".format(float(S) / float(N)))
print("Total correct: {} out of {}".format(N - S, N))
print('Video Recognized Correct')
print('=====================================================================================================')
for video_num in test_set.sentences_index:
correct_sentence = [test_set.wordlist[i]
for i in test_set.sentences_index[video_num]]
recognized_sentence = [guesses[i]
for i in test_set.sentences_index[video_num]]
for i in range(len(recognized_sentence)):
if recognized_sentence[i] != correct_sentence[i]:
recognized_sentence[i] = '*' + recognized_sentence[i]
print('{:5}: {:60} {}'.format(video_num, ' '.join(
recognized_sentence), ' '.join(correct_sentence)))
def getKey(item):
return item[1]
def train_all_words(training: WordsData, model_selector):
""" train all words given a training set and selector
:param training: WordsData object (training set)
:param model_selector: class (subclassed from ModelSelector)
:return: dict of models keyed by word
"""
sequences = training.get_all_sequences()
Xlengths = training.get_all_Xlengths()
model_dict = {}
for word in training.words:
model = model_selector(sequences, Xlengths, word,
n_constant=3).select()
model_dict[word] = model
return model_dict
def combine_sequences(split_index_list, sequences):
'''
concatenate sequences referenced in an index list and returns tuple of the new X,lengths
useful when recombining sequences split using KFold for hmmlearn
:param split_index_list: a list of indices as created by KFold splitting
:param sequences: list of feature sequences
:return: tuple of list, list in format of X,lengths use in hmmlearn
'''
sequences_fold = [sequences[idx] for idx in split_index_list]
X = [item for sublist in sequences_fold for item in sublist]
lengths = [len(sublist) for sublist in sequences_fold]
return X, lengths
def putHTML(color, msg):
source = """<font color={}>{}</font><br/>""".format(color, msg)
return HTML(source)
def feedback(passed, failmsg='', passmsg='Correct!'):
if passed:
return putHTML('green', passmsg)
else:
return putHTML('red', failmsg)
def test_features_tryit(asl):
print('asl.df sample')
display(asl.df.head())
sample = asl.df.ix[98, 1][GROUND_FEATURES].tolist()
correct = [9, 113, -12, 119]
failmsg = 'The values returned were not correct. Expected: {} Found: {}'.format(
correct, sample)
return feedback(sample == correct, failmsg)
def test_std_tryit(df_std):
print('df_std')
display(df_std)
sample = df_std.ix['man-1'][RAW_FEATURES]
correct = [15.154425, 36.328485, 18.901917, 54.902340]
failmsg = 'The raw man-1 values returned were not correct.\nExpected: {} for {}'.format(
correct, RAW_FEATURES)
return feedback(np.allclose(sample, correct, .001), failmsg)
|
[
"="
] |
=
|
1664e7185a09522f272a97c6c6e2f696cb4d1958
|
34087e6a9bb41d9240de4c1bf91cb14a044126bc
|
/scripts/bandplot
|
3e0f44ec4f5991fe99775500897ff806f0be1c70
|
[] |
no_license
|
materialsvirtuallab/phonopy
|
62117e757f98447de2b247e4b6aa186b0b141aab
|
97888bac864f8d8e5eee799b2eeef232e627f018
|
refs/heads/master
| 2020-12-01T03:09:31.707376
| 2014-09-08T15:42:54
| 2014-09-08T15:42:54
| 21,427,440
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,845
|
#!/usr/bin/env python
# Copyright (C) 2011 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
try:
import yaml
except ImportError:
print "You need to install python-yaml."
exit(1)
try:
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from phonopy.units import VaspToTHz
def get_plot_data(data):
segment_positions = []
frequencies = []
distances = []
npoints = data['nqpoint'] / data['npath']
for j, v in enumerate(data['phonon']):
frequencies.append([f['frequency'] for f in v['band']])
distances.append(v['distance'])
if j % npoints == 0:
segment_positions.append(v['distance'])
return distances, frequencies, segment_positions
# Parse options
from optparse import OptionParser
parser = OptionParser()
parser.set_defaults(factor=1.0,
f_max=None,
f_min=None,
is_gnuplot=False,
is_points=False,
is_vertial_line=False,
output_filename=None,
labels=None,
show_legend=False,
title=None)
parser.add_option("--factor", dest="factor", type="float",
help="Conversion factor to favorite frequency unit")
parser.add_option("--fmax", dest="f_max", type="float",
help="Maximum frequency plotted")
parser.add_option("--fmin", dest="f_min", type="float",
help="Minimum frequency plotted")
parser.add_option("--gnuplot", dest="is_gnuplot", action="store_true",
help="Output in gnuplot data style")
parser.add_option("--legend", dest="show_legend",
action="store_true",
help="Show legend")
parser.add_option("--line", "-l", dest="is_vertial_line",
action="store_true",
help="Vertial line is drawn at between paths")
parser.add_option("-o", "--output", dest="output_filename",
action="store", type="string",
help="Output filename of PDF plot")
parser.add_option("--labels", dest="labels", action="store", type="string",
help="Show labels at band segments")
parser.add_option("--points", dest="is_points",
action="store_true",
help="Draw points")
parser.add_option("-t", "--title", dest="title", action="store",
type="string", help="Title of plot")
(options, args) = parser.parse_args()
if options.output_filename:
import matplotlib
matplotlib.use('Agg')
if not options.is_gnuplot:
import matplotlib.pyplot as plt
if options.labels:
from matplotlib import rc
rc('text', usetex=True)
colors = ['b-', 'g-', 'r-', 'c-', 'm-', 'y-', 'k-', 'b--', 'g--', 'r--', 'c--', 'm--', 'y--', 'k--']
if options.is_points:
colors = [x + 'o' for x in colors]
count = 0
if len(args) == 0:
filenames = ['band.yaml']
else:
filenames = args
if options.is_gnuplot:
print "# distance frequency (bands are separated by blank lines)"
for i, filename in enumerate(filenames):
string = open(filename).read()
data = yaml.load(string, Loader=Loader)
distances, frequencies, segment_positions = get_plot_data(data)
if options.is_gnuplot:
print "# segments:",
for v in segment_positions:
print "%10.8f" % v,
print "%10.8f" % distances[-1]
elif options.is_vertial_line and len(filenames) == 1:
for v in segment_positions[1:]:
plt.axvline(x=v, linewidth=0.5, color='b')
for j, freqs in enumerate(np.array(frequencies).T):
if options.is_gnuplot:
for d, f in zip(distances, freqs * options.factor):
print d,f
print
else:
if j==0:
plt.plot(distances, freqs * options.factor, colors[i],
label=filename)
else:
plt.plot(distances, freqs * options.factor, colors[i])
if options.is_gnuplot:
print
if not options.is_gnuplot:
plt.ylabel('Frequency')
plt.xlabel('Wave vector')
plt.xlim(distances[0], distances[-1])
if not options.f_max == None:
plt.ylim(ymax = options.f_max)
if not options.f_min == None:
plt.ylim(ymin = options.f_min)
plt.axhline(y=0, linestyle=':', linewidth=0.5, color='b')
if len(filenames) == 1:
xticks = segment_positions + [distances[-1]]
if options.labels:
labels = [x for x in options.labels.split()]
if len(labels)==len(xticks):
plt.xticks(xticks, labels)
else:
print "Numbers of labels and band segments don't match."
sys.exit(1)
else:
plt.xticks(xticks, [''] * len(xticks))
else:
plt.xticks([])
if not options.title == None:
plt.title(options.title)
if options.show_legend:
plt.legend()
if not options.output_filename == None:
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['font.family'] = 'serif'
plt.savefig(options.output_filename)
else:
plt.show()
|
[
"atz.togo@gmail.com"
] |
atz.togo@gmail.com
|
|
08c4ff249af2ede845061c68aa550a412a32f068
|
b3586235dc1e1acbd49fab996f581269a808480b
|
/sistema/producao/migrations/0090_auto_20200419_1946.py
|
05ec1f4442faf22924833cefc45f46cace20c101
|
[] |
no_license
|
gonfersilva/Sistema
|
37ad1cd03dfbb7889fa0b0367c6ebd9044712ae3
|
4c6d9ade22040972efbe892eae0130939d7b5c46
|
refs/heads/master
| 2021-10-23T23:21:51.262723
| 2021-10-13T19:45:49
| 2021-10-13T19:45:49
| 155,545,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 2.2.7 on 2020-04-19 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('producao', '0089_reciclado_timestamp_edit'),
]
operations = [
migrations.AlterField(
model_name='reciclado',
name='timestamp_edit',
field=models.DateTimeField(),
),
]
|
[
"goncalo.silva@elastictek.com"
] |
goncalo.silva@elastictek.com
|
3eec27e2dd17dd5a63596c4c056f129c7fd1b671
|
6e4f493329341661d869d9c5a8dd21c1baa6a621
|
/science/Optics/mirrorformula_cnc/mirrorformula_cnc.py
|
7b65c4ba9fd8bfe303a01feb13b81077b5ec018f
|
[] |
no_license
|
yujinee/scimat2
|
8dd03e1ba003715dd910d7e6a086b6f596a7f23b
|
98712c061b9ce5337b3da5b421926de4aaefbe67
|
refs/heads/main
| 2023-08-28T01:57:53.482632
| 2021-10-22T17:30:10
| 2021-10-22T17:30:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,637
|
py
|
import random
# An object is placed at u cm in front of a concave mirror of focal length f cm. Find at what distance image is formed and its nature.
# An image is formed by a concave mirror of focal length f cm at a distance of v cm in front of it. Find at what distance object is placed in front of mirror.
# An image is formed by a concave mirror of focal length f cm at a distance of v cm behind it. Find at what distance object is placed in front of mirror.
# An object is placed at u cm in front of a concave mirror. Image is formed at a distance of v cm in front of it. Find the focal length of the mirror.
# An object is placed at u cm in front of a concave mirror. Image is formed at a distance of v cm behind it. Find the focal length of the mirror.
# All variants of 1/f = 1/v + 1/u for concave mirror
qns = open('./questions.txt', 'w')
ans = open('./answers.txt','w')
no_of_samples = 3000000
def calculation_1(u, f) :
return round((u*f)/(u-f),1)
def calculation_2(u, f) :
return round((u*f)/(f-u),1)
def calculation_3(v, f) :
return round((v*f)/(v-f),1)
def calculation_4(v, f) :
return round((v*f)/(v+f),1)
def calculation_5(u, v) :
return round((u*v)/(u+v),1)
def calculation_6(u, v) :
return round((u*v)/(v-u),1)
def type1() :
f = random.randint(1,800)
u = random.randint(f+1,f+1200)
q = "An object is placed at " + str(u) + " cm in front of a concave mirror of focal length " + str(f) + " cm. Find at what distance image is formed and its nature.\n"
v = str(calculation_1(u,f)) + "cm and real\n"
return q,v
def type2() :
u = random.randint(1,1000)
f = random.randint(u+1,u+1000)
q = "An object is placed at " + str(u) + " cm in front of a concave mirror of focal length " + str(f) + " cm. Find at what distance image is formed and its nature.\n"
v = str(calculation_2(u,f)) + "cm and virtual\n"
return q,v
def type3() :
f = random.randint(1,800)
v = random.randint(f+1,f+1200)
q = "An image is formed by a concave mirror of focal length " + str(f) + " cm at a distance of " + str(v) + " cm in front of it. Find at what distance object is placed in front of mirror.\n"
u = str(calculation_3(v,f)) + "cm\n"
return q,u
def type4() :
f = random.randint(1,1000)
v = random.randint(1,1000)
q = "An image is formed by a concave mirror of focal length " + str(f) + " cm at a distance of " + str(v) + " cm behind it. Find at what distance object is placed in front of mirror.\n"
u = str(calculation_4(v,f)) + "cm\n"
return q,u
def type5() :
u = random.randint(1,1000)
v = random.randint(1,1000)
q = "An object is placed at " + str(u) + " cm in front of a concave mirror. Image is formed at a distance of " + str(v) + " cm in front of it. Find the focal length of the mirror.\n"
f = str(calculation_5(u,v)) + "cm\n"
return q,f
def type6() :
u = random.randint(1,1000)
v = random.randint(u+1,u+1000)
q = "An object is placed at " + str(u) + " cm in front of a concave mirror. Image is formed at a distance of " + str(v) + " cm behind it. Find the focal length of the mirror.\n"
f = str(calculation_6(u,v)) + "cm\n"
return q,f
for i in range(no_of_samples):
types = random.randint(1,6)
if types == 1:
ques,answer = type1()
elif types == 2:
ques,answer = type2()
elif types == 3:
ques,answer = type3()
elif types == 4:
ques,answer = type4()
elif types == 5:
ques,answer = type5()
elif types == 6:
ques,answer = type6()
qns.write(ques)
ans.write(answer)
qns.close()
ans.close()
|
[
"snehith.chatakonda@plivo.com"
] |
snehith.chatakonda@plivo.com
|
2c904b54db472eb9fcd58830d3373a19f91eec34
|
252d023b55575f3d25fb9ab8faa92084479244b3
|
/indexpy/http/middleware.py
|
4d296400f4c12251c08bdfc25b053ada44efdb7b
|
[
"Apache-2.0"
] |
permissive
|
sangensong/index.py
|
fef31a222b34961b5869a5d2a5832040029be778
|
4b4cfd0aeef67986f484e3f5f06544b8a2cb7699
|
refs/heads/master
| 2023-03-03T12:24:00.468335
| 2021-02-13T14:46:33
| 2021-02-13T14:46:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,854
|
py
|
from __future__ import annotations
import typing
from indexpy.concurrency import keepasync
if typing.TYPE_CHECKING:
from .request import Request
from .responses import Response, convert_response
MiddlewareMeta = keepasync("process_request", "process_response", "process_exception")
class MiddlewareMixin(metaclass=MiddlewareMeta): # type: ignore
mounts: typing.Sequence[typing.Callable] = ()
def __init__(self, get_response: typing.Callable) -> None:
self.get_response = self.mount_middleware(get_response)
def mount_middleware(self, get_response: typing.Callable) -> typing.Callable:
for middleware in reversed(self.mounts):
get_response = middleware(get_response)
return get_response
async def __call__(self, request: Request) -> Response:
response = await self.process_request(request)
if response is None:
try:
response = await self.get_response(request)
except Exception as exc:
response = await self.process_exception(request, exc)
if response is None:
raise exc
response = convert_response(response)
response = await self.process_response(request, response)
return response
async def process_request(self, request: Request) -> typing.Optional[typing.Any]:
"""
Must return None, otherwise return the value as the result of this request.
"""
async def process_response(self, request: Request, response: Response) -> Response:
return response
async def process_exception(
self, request: Request, exception: Exception
) -> typing.Optional[typing.Any]:
"""
If return None, will raise exception.
Otherwise return the value as the result of this request.
"""
|
[
"me@abersheeran.com"
] |
me@abersheeran.com
|
8a5b454d7307b0ef888c3ccc7dbcc2a78b49ce39
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_136/ch25_2020_10_04_03_54_44_850472.py
|
7687d7e86cce8171d9932c23f11bbf1f7f0be68e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
import math
g= 9.8
def distancia(v, o):
k= math.sin
h= (180/math.pi)*o
d= ((v**2)*k(2*h))/g
if d>=99 and d<=101:
print ('Acertou!')
elif d>101:
print ('Muito longe')
elif d<99:
print ('Muito perto')
|
[
"you@example.com"
] |
you@example.com
|
14bc887b39a0ef1763ad3da22c7d9239cd101b13
|
3803b6364290e21061e8c0c97d3e9c0b204c50fc
|
/gae_main.py
|
7533cd5125638887f7837401aa16cc45aa1c6a9d
|
[] |
no_license
|
t0ster/django-gae-buildout-skeleton
|
5a146b94d35ff466b9ee5e981c0ecdfb31eb28b5
|
71c3553b661fbd58937797d352f1a337c1641b0a
|
refs/heads/master
| 2020-05-27T06:28:21.745403
| 2011-06-27T19:24:21
| 2011-06-27T19:24:21
| 1,962,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
import sys
from os.path import dirname, abspath, join
PROJECT_DIR = join(abspath(dirname(__file__)), "testapp")
if PROJECT_DIR not in sys.path or sys.path.index(PROJECT_DIR) > 0:
while PROJECT_DIR in sys.path:
sys.path.remove(PROJECT_DIR)
sys.path.insert(0, PROJECT_DIR)
import djangoappengine.main.main
djangoappengine.main.main.main()
|
[
"roman@bravetstudio.com"
] |
roman@bravetstudio.com
|
39b77ac51e19c5f33bf7b51871b0966c27a13121
|
141b42d9d72636c869ff2ce7a2a9f7b9b24f508b
|
/myvenv/Lib/site-packages/phonenumbers/data/region_SC.py
|
b0e98d3e74585ab661b7fde30f9021a9aaefbf4c
|
[
"BSD-3-Clause"
] |
permissive
|
Fa67/saleor-shop
|
105e1147e60396ddab6f006337436dcbf18e8fe1
|
76110349162c54c8bfcae61983bb59ba8fb0f778
|
refs/heads/master
| 2021-06-08T23:51:12.251457
| 2018-07-24T08:14:33
| 2018-07-24T08:14:33
| 168,561,915
| 1
| 0
|
BSD-3-Clause
| 2021-04-18T07:59:12
| 2019-01-31T17:00:39
|
Python
|
UTF-8
|
Python
| false
| false
| 971
|
py
|
"""Auto-generated file, do not edit by hand. SC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SC = PhoneMetadata(id='SC', country_code=248, international_prefix='0(?:[02]|10?)',
general_desc=PhoneNumberDesc(national_number_pattern='[24689]\\d{5,6}', possible_length=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='4[2-46]\\d{5}', example_number='4217123', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='2[5-8]\\d{5}', example_number='2510123', possible_length=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8000\\d{3}', example_number='8000000', possible_length=(7,)),
voip=PhoneNumberDesc(national_number_pattern='(?:64\\d|971)\\d{4}', example_number='6412345', possible_length=(7,)),
preferred_international_prefix='00',
number_format=[NumberFormat(pattern='(\\d)(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[246]'])])
|
[
"gruzdevasch@gmail.com"
] |
gruzdevasch@gmail.com
|
410c72545e00ff4b20fa2686e2cb0a81edbfd253
|
0cea2eef085a16792b0722b5ea1ccecf22ebf56a
|
/emu/tests/test_wps_wordcounter.py
|
7957d44ea5c7acf1ec7c8b324c4d11a2df58fc2a
|
[
"Apache-2.0"
] |
permissive
|
Ouranosinc/emu
|
6931657412c2a3412e9548b2ad80a91c7362e79b
|
f3b92f44555b9e85f8c62e8e34a8a59d420a1c67
|
refs/heads/master
| 2021-05-04T02:00:59.517729
| 2016-11-18T17:48:31
| 2016-11-18T17:48:31
| 71,271,528
| 0
| 0
|
Apache-2.0
| 2020-11-11T03:21:22
| 2016-10-18T17:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
import pytest
from pywps import Service
from emu.tests.common import client_for, assert_response_success
from emu.processes.wps_wordcounter import WordCounter
@pytest.mark.online
def test_wps_wordcount():
client = client_for(Service(processes=[WordCounter()]))
datainputs = "text={0}".format(
"https://en.wikipedia.org/wiki/Web_Processing_Service")
resp = client.get(
service='wps', request='execute', version='1.0.0',
identifier='wordcounter',
datainputs=datainputs)
assert_response_success(resp)
|
[
"ehbrecht@dkrz.de"
] |
ehbrecht@dkrz.de
|
ddc2256caa53e5da02d9ef82936e44811ede8002
|
71e50200ed8fec2bd567b060d52b6ab5c216dc08
|
/app/auth/forms.py
|
3a2250613203d75d661a4c5f7f01523c90374425
|
[
"MIT"
] |
permissive
|
ThiraTheNerd/the_blog
|
5361d7b92be857e4576d3d96e64c176539ff7cba
|
3edd51b2507726b4339f3b59b95133f9e2005700
|
refs/heads/master
| 2023-06-10T15:16:06.112694
| 2021-06-27T07:59:02
| 2021-06-27T07:59:02
| 379,469,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,ValidationError,BooleanField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
username = StringField('Enter your username',validators = [Required()])
password = PasswordField('Password',validators = [Required(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('Confirm Passwords',validators = [Required()])
submit = SubmitField('Sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('There is an account with that email')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('That username is taken')
class LoginForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
password = PasswordField('Password',validators =[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('Sign In')
|
[
"thiragithinji@gmail.com"
] |
thiragithinji@gmail.com
|
264ea3af5cb07d50065bcd17d8510014c65e8225
|
65b4522c04c2be071c2d42095956fe950fe1cebe
|
/agu-paper/near_field_co_disp/verticals/plot_co_obs_pred.py
|
1c81afdcebed862eb9e1599a073dafd28848637f
|
[] |
no_license
|
geodesy/viscojapan
|
ac0cd93f7a2134cd2651623b94879dcc21c0c46a
|
03e70265b56eb5994e73bcb6066f0be338e42f27
|
refs/heads/master
| 2021-03-03T18:19:07.779601
| 2015-07-16T03:50:49
| 2015-07-16T03:50:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,052
|
py
|
from os.path import join
import numpy as np
import tempfile
import pGMT
import viscojapan as vj
gmt = pGMT.GMT()
gmt.gmtset('ANNOT_FONT_SIZE_PRIMARY','9',
'LABEL_FONT_SIZE','9',
'BASEMAP_TYPE','PLAIN',
)
gplt = gmt.gplt
lon1 = 139
lon2 = 145
lat1 = 34.7
lat2 = 41
gplt.psbasemap(
R = '{lon1}/{lon2}/{lat1}/{lat2}'.format(lon1=lon1,
lon2=lon2,
lat1 = lat1,
lat2 = lat2
), # region
J = 'B{lon0}/{lat0}/{lat1}/{lat2}/14c'.format(
lon0=(lon1+lon2)/2.,
lat0 = (lat1+lat2)/2.,
lat1 = lat1,
lat2 = lat2), # projection
B = '2', U='20/0/22/Yang', K='', P=''
)
# plot coseismic slip
splt = vj.gmt.GMTSlipPlotter(
gplt = gplt,
slip_file_txt = '../share/co_slip'
)
splt.init(
# original_cpt_file = 'bath_112.cpt',
original_cpt_file = '../Blues_09.cpt',
#if_cpt_reverse = True
)
splt.plot_slip()
splt.plot_scale(
xpos = 12,
ypos = 5)
vj.gmt.plot_plate_boundary(gplt, color=100)
scale = 5
###########################
# onshore
# plot prediction
plt_vec = vj.gmt.VecFieldPlotter(gmt, 'share/co_ver_pred',scale)
plt_vec.plot_vectors(arrow_width='.2', head_length='.1', head_width='.1',
pen_width='1.2')
plt_vec.plot_vec_legend(
lon=142.5, lat=40.5,
leg_len = 0.2,
leg_txt = '20 cm pred.',
text_offset_lon = -0.4,
text_offset_lat = -0.15,
if_vertical = True
)
# plot observation
plt_vec = vj.gmt.VecFieldPlotter(gmt, 'share/co_ver_obs',scale,'red')
plt_vec.plot_vectors(arrow_width='.2', head_length='.1', head_width='.1',
pen_width='1.2')
plt_vec.plot_vec_legend(
lon=143.2, lat=40.5,
leg_len = .2,
leg_txt = '20 cm obs. ONSHORE',
text_offset_lon = -0.2,
text_offset_lat = -0.15,
if_vertical = True,
)
#######################3
# plot seafloor:
scale = 1.5
# plot prediction
plt_vec = vj.gmt.VecFieldPlotter(gmt, 'share/co_ver_pred_seafloor',scale)
plt_vec.plot_empty_vectors()
plt_vec.plot_vec_legend(
lon=143, lat=35.2,
leg_len = 1,
leg_txt = '1 m pred.',
text_offset_lon = -0.2,
text_offset_lat = -0.25,
if_vertical = True
)
# plot observation
plt_vec = vj.gmt.VecFieldPlotter(gmt, 'share/co_ver_obs_seafloor',scale,'red')
plt_vec.plot_empty_vectors()
plt_vec.plot_vec_legend(
lon=143.7, lat=35.2,
leg_len = 1,
leg_txt = '1 m obs. SEAFLOOR',
text_offset_lon = -0.2,
text_offset_lat = -0.25,
if_vertical = True
)
gplt.pscoast(
R = '', J = '',
D = 'h', N = 'a/faint,50,--', A='500',
W = 'faint,100', L='f139.5/39.5/38/50+lkm+jt',
O = '', K='')
vj.gmt.plot_seafloor_stations(gplt, marker_size=0, network='SEAFLOOR_POST',
justification='MB', text_offset_Y=0.03,
fontsize='8')
gplt.finish()
gmt.save('seafloor_co_ver_obs_pred.pdf')
|
[
"zy31415@gmail.com"
] |
zy31415@gmail.com
|
b8543bcd94f5a24fda57f0ec6485022513811113
|
e443674961b04476e96b0db3b7a963966bf72818
|
/score/urls.py
|
d6fc782e280fdac71818a20f18de39f8174172f9
|
[
"BSD-3-Clause"
] |
permissive
|
jbbqqf/okapi
|
14ded14219ba9ed9dc0acaea1c6b97a2b10afa73
|
3db29ef1e15685fae304190bd176f75c4e367d03
|
refs/heads/master
| 2022-11-28T15:08:37.357135
| 2016-01-04T16:01:25
| 2016-01-04T16:01:25
| 283,250,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from score.views import stats, mystats, ScoreView, CurrentScoreView
router = DefaultRouter()
router.register(r'scores', ScoreView)
router.register(r'currentscores', CurrentScoreView)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^stats/$', stats),
url(r'^mystats/$', mystats),
]
|
[
"jbaptiste.braun@gmail.com"
] |
jbaptiste.braun@gmail.com
|
c301f72048c538c25d20741d719ccdcd362b3ffb
|
ad0857eaba945c75e705594a53c40dbdd40467fe
|
/baekjoon/python/pow_of_matrix_10830.py
|
96dbae8eb57691de783032c11a2b4137b5b2df3e
|
[
"MIT"
] |
permissive
|
yskang/AlgorithmPractice
|
c9964d463fbd0d61edce5ba8b45767785b0b5e17
|
3efa96710e97c8740d6fef69e4afe7a23bfca05f
|
refs/heads/master
| 2023-05-25T13:51:11.165687
| 2023-05-19T07:42:56
| 2023-05-19T07:42:56
| 67,045,852
| 0
| 0
| null | 2021-06-20T02:42:27
| 2016-08-31T14:40:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
# Title: 행렬 제곱
# Link: https://www.acmicpc.net/problem/10830
import sys
import copy
sys.setrecursionlimit(10 ** 6)
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def multiple_matrix(n: int, a: list, b: list):
res = []
for row in a:
t_row = []
for i in range(n):
s = 0
for x, v in enumerate(row):
s = (s + (v * (b[x][i] % 1000)) % 1000)%1000
t_row.append(s)
res.append(t_row)
return res
def solution(n: int, b: int, matrix: list):
bin_b = list('{0:b}'.format(b))
acc = [[1 if x==y else 0 for x in range(n)] for y in range(n)]
temp = copy.deepcopy(matrix)
if bin_b.pop() == '1':
acc = multiple_matrix(n, acc, matrix)
while bin_b:
temp = multiple_matrix(n, temp, temp)
if bin_b.pop() == '1':
acc = multiple_matrix(n, acc, temp)
temp = [[1 if x==y else 0 for x in range(n)] for y in range(n)]
ans = []
for row in acc:
ans.append(' '.join(map(str, row)))
return '\n'.join(ans)
def main():
n, b = read_list_int()
matrix = []
for _ in range(n):
matrix.append(read_list_int())
print(solution(n, b, matrix))
if __name__ == '__main__':
main()
|
[
"yongsung.kang@gmail.com"
] |
yongsung.kang@gmail.com
|
777aadcb892990786aef2675249423db680d99b2
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_099/ch34_2019_08_28_17_31_34_673027.py
|
1e0afb12ab5000e405d6a94403271238303134e8
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
dep = float(input('Qual o depósito inicial?'))
i = float(input('Qual a taxa de juros?'))
total=0
t=1
while i<=24:
mes = (dep*(1+i)**t)
total = total + mes
print ('{0}:.2f'.format(mes))
i=i+1
print ('{0}:.2f'.format(total))
|
[
"you@example.com"
] |
you@example.com
|
f9ca04d56bc46f75f3cd88a867059f4016baeb1f
|
c60c199410289c1d7ec4aea00833b461e1f08f88
|
/.history/older-than/older/source-example/day3/function/cpall.py
|
4f328903303857e622385428d5a8aeb80f642d18
|
[] |
no_license
|
ver007/pythonjumpstart
|
66fb111e6af197fad3e853b2c2d712a1b57a7d59
|
5b1f52479abd07456e2da494149e491d398f3b7d
|
refs/heads/master
| 2021-01-21T01:34:35.501870
| 2015-05-13T14:10:13
| 2015-05-13T14:10:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
#!/usr/bin/env python
import sys
def copyall(*files):
target = files[-1]
with open(target, 'w') as fw:
for filename in files[:-1]:
with open(filename) as fp:
fw.write(fp.name.center(60, '-') + "\n")
for line in fp:
fw.write(line)
print "%s : file copied " % fp.name
fw.write('-'.center(60, '-')+"\n")
fw.write("\n")
def usage():
if len(sys.argv) < 3:
print "Usage : "
print "%s source [source ....] target" % sys.argv[0]
exit(1)
usage()
copyall(*sys.argv[1:])
|
[
"ravi@rootcap.in"
] |
ravi@rootcap.in
|
7eabfc4c5ae2a54b39ec1b5679ebc7261404c15a
|
181af10fcf40b824fe92d3b8f72fd15d6d1490c2
|
/Contests/101-200/week 195/1499. Max Value of Equation/Max Value of Equation.py
|
4a8cd0f2c2a2c938b386ace14ced1673cfa96cb1
|
[] |
no_license
|
wangyendt/LeetCode
|
402c59a0b7b7f5b3a672231ea5dad8056ade36af
|
4a3ba15284c45b2d8bf38306c8c8526ae174615c
|
refs/heads/master
| 2023-08-10T06:27:54.995152
| 2023-08-10T02:22:27
| 2023-08-10T02:22:27
| 176,651,399
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Wayne
@contact: wangye.hope@gmail.com
@software: PyCharm
@file: Max Value of Equation
@time: 2020/06/28 15:53
"""
import heapq
import sys
class Solution:
def findMaxValueOfEquation(self, points: list(list()), k: int) -> int:
h = []
ret = -sys.maxsize
for px, py in points:
while h and px - h[0][1] > k:
heapq.heappop(h)
if h:
ret = max(ret, px + py - h[0][0])
heapq.heappush(h, (px - py, px))
return ret
|
[
"905317742@qq.com"
] |
905317742@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.