blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4447eb60eec73954e51b7efa70ffa9f18869a7a | 24a291e5eb298b7c2b4f1105d789ac488457b59c | /Python_Basics/python06_16_DataTypeEx0_김민교.py | 539d13510fc578ff955549ac1e7e68e6f860fb61 | [] | no_license | gmrdns03/Python-Introductory-Course_Minkyo | da3afff502ed44f178d5b3885fbb1b01249ad1de | ef0d4e16aee3dba6a4a10c422ef68b1465745833 | refs/heads/main | 2023-05-29T16:08:31.814542 | 2021-06-23T13:32:14 | 2021-06-23T13:32:14 | 379,300,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py |
vl01 = 10
vl02 = 20
print(id(vl01))
print(id(vl02))
'''
1530162480 == vl01이라는 변수에 10이 저장되어 있는 주소값
1530162800 == vl02이라는 변수에 20이 저장되어 있는 주소값
id 함수는 변수가 가리키고 있는 객체의 주소 값을 돌려주는 파이썬 내장 함수이다.
즉 여기에서 필자가 만든 변수 vl01이 가리키는 리스트의 주소 값은 1530162480 임을 알 수 있다.
''' | [
"noreply@github.com"
] | gmrdns03.noreply@github.com |
47c2d6a4851473c5dd8779a58f3fbb002659da78 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/inspur/sm/plugins/modules/self_test_info.py | a5b0be963a3356086489952a29788583ec4b1504 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 2,215 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2020 Inspur Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: self_test_info
version_added: "0.1.0"
author:
- WangBaoshan (@ISIB-group)
short_description: Get self test information.
description:
- Get self test information on Inspur server.
options: {}
extends_documentation_fragment:
- inspur.sm.ism
'''
EXAMPLES = '''
- name: self test
hosts: ism
connection: local
gather_facts: no
vars:
ism:
host: "{{ ansible_ssh_host }}"
username: "{{ username }}"
password: "{{ password }}"
tasks:
- name: "Get self test information"
inspur.sm.self_test_info:
provider: "{{ ism }}"
'''
RETURN = '''
message:
description: Messages returned after module execution.
returned: always
type: str
state:
description: Status after module execution.
returned: always
type: str
changed:
description: Check to see if a change was made on the device.
returned: always
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.inspur.sm.plugins.module_utils.ism import (ism_argument_spec, get_connection)
class Test(object):
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
self.results = dict()
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def run_command(self):
self.module.params['subcommand'] = 'getselftest'
self.results = get_connection(self.module)
def show_result(self):
"""Show result"""
self.module.exit_json(**self.results)
def work(self):
"""Worker"""
self.run_command()
self.show_result()
def main():
argument_spec = dict()
argument_spec.update(ism_argument_spec)
test_obj = Test(argument_spec)
test_obj.work()
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
50e4e33a9e057ace2c4b3719982e0faa15bf0597 | 863a1cda00ab2eda30a9463d69e471740ae7c515 | /models/SSD_DetNet.py | b458d2bf238bfabb08290b34efd92b1166b4bf1f | [] | no_license | sclzsx/Improved_SSD | ab9571c09c22589da61f00ecd42896ac194b3444 | bd6229a134188ab08115fa4105ec0c96f4824b0f | refs/heads/master | 2023-03-28T08:26:27.583019 | 2021-04-02T13:38:30 | 2021-04-02T13:38:30 | 354,029,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,365 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.models as models
import torch.backends.cudnn as cudnn
import os
import torch.nn.init as init
from models.modules import *
from models.FPN import DetNet_FPN
from models.backbones.DetNet import DetNet
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size: input image size
base: VGG16 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, backbone, neck, head, num_classes):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
# SSD network
self.base = backbone
# Layer learns to scale the l2 normalized features from conv4_3
#self.L2Norm = L2Norm(512, 20)
self.Norm = BasicRFB(128,128,stride = 1,scale=1.0)
self.fpn = neck
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
loc = list()
conf = list()
# apply vgg up to conv4_3 relu
fpn_sources = self.base(x)
features = self.fpn(fpn_sources)
features[0] = self.Norm(features[0])
# apply multibox head to source layers
for (x, l, c) in zip(features, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def build_head(cfg, num_classes):
loc_layers = []
conf_layers = []
# 38*38 512
loc_layers += [nn.Conv2d(128,cfg[0] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[0] * num_classes, kernel_size=1, padding=0)]
# 19*19 512
loc_layers += [nn.Conv2d(128,cfg[1] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[1] * num_classes, kernel_size=1, padding=0)]
# 10*10 256
loc_layers += [nn.Conv2d(128,cfg[2] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[2] * num_classes, kernel_size=1, padding=0)]
# 10*10 256
loc_layers += [nn.Conv2d(128,cfg[3] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[3] * num_classes, kernel_size=1, padding=0)]
# 10*10 256
loc_layers += [nn.Conv2d(128,cfg[4] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[4] * num_classes, kernel_size=1, padding=0)]
# 10*10 256
loc_layers += [nn.Conv2d(128,cfg[5] * 4, kernel_size=1, padding=0)]
conf_layers +=[nn.Conv2d(128,cfg[5] * num_classes, kernel_size=1, padding=0)]
return (loc_layers, conf_layers)
mbox = {
'300': [6, 6, 6, 6, 6, 6], # number of boxes per feature map location
'512': [6, 6, 6, 6, 6, 4, 4],
}
def build_net(phase, size=300, num_classes=21):
if phase != "test" and phase != "train":
print("Error: Phase not recognized")
return
if size != 300 and size != 512:
print("Error: Sorry only RFBNet300 and RFBNet512 are supported!")
return
backbone = DetNet(num_classes)
neck = DetNet_FPN([128, 256, 256, 256, 256, 256])
head = build_head(mbox[str(size)], num_classes)
return SSD(phase, backbone, neck, head, num_classes)
if __name__ == "__main__":
net = build_net('test', num_classes=5)
# print(net)
# print(x.shape)
from ptflops import get_model_complexity_info
img_dim = 300
flops, params = get_model_complexity_info(net, (img_dim, img_dim), as_strings=True, print_per_layer_stat=True)
print('Flops: ' + flops)
print('Params: ' + params)
# def hook(self, input, output):
# print(output.data.cpu().numpy().shape)
#
# for m in net.modules():
# if isinstance(m, nn.Conv2d):
# m.register_forward_hook(hook)
net = net.cuda()
import time
with torch.no_grad():
x = torch.randn(1, 3, 300, 300).cuda()
s = time.clock()
y = net(x)
print(type(y), 1 / (time.clock() - s))
| [
"1044068981@qq.com"
] | 1044068981@qq.com |
43b21f98599d04e5232fe8734a9a9ac30960cbdc | 671ec3242f1d83846560ccf34ea9a924c6e37354 | /pandastabletooltip/main.py | 4a87276cf93fc0409da350ba10dbec02bdf9b0aa | [
"MIT"
] | permissive | simon-ritchie/pandas-table-tooltip | a4590731d57d73ac954cf221d9a2af9fe3ed6aef | cf85501f0502c4d7c7b67dfdbe592c69f5be59c8 | refs/heads/master | 2020-06-24T15:30:20.024367 | 2019-07-27T03:48:16 | 2019-07-27T03:48:16 | 199,001,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | """A module that handles tooltip display.
"""
import pandas as pd
from IPython.display import HTML, display
def make_table_html_with_tooltip(df, limit=3000):
"""
Make a table with tooltips.
Parameters
----------
df : DataFrame
DataFrame to be displayed.
limit : int, default 3000
Display limit number.
Raises
------
ValueError
If the number of DataFrame rows exceeds the display
limit number.
Returns
-------
html : HTML
Result HTML object.
"""
if len(df) > limit:
err_msg = 'The number of DataFrame rows exceeds the dispaly limit '\
'(currently limited {limit_num}). '\
'Please adjust the `limit` argument.'.format(
limit_num=limit)
raise ValueError(err_msg)
html_str = '<table border="1" class="dataframe">'
html_str += '\n <thead>'
html_str += '\n <tr style="text-aligh: right;">'
html_str += '\n <th></th>'
for column in df.columns:
html_str += '\n <th>{column}</th>'.format(column=column)
html_str += '\n </tr>'
html_str += '\n </thead>'
for index_val, sr in df.iterrows():
html_str += '\n <tbody>'
html_str += '\n <tr>'
html_str += '\n <th>{index_val}</th>'.format(
index_val=index_val)
for column_val, value in sr.iteritems():
tooltip = '{index_val}, {column_val}'.format(
index_val=index_val,
column_val=column_val)
html_str += \
'\n <td title="{tooltip}">'\
'{value}</td>'.format(
tooltip=tooltip,
value=value)
html_str += '\n </tr>'
html_str += '\n </tbody>'
html_str += '\n</table>'
return HTML(html_str)
| [
"antisocial.sid2@gmail.com"
] | antisocial.sid2@gmail.com |
a9aa83125c49314aac7eec6297fd67dfd86331f6 | 4c7914bf0eb52f2fe5dab70fa630a322a9449e05 | /learnOpencv/venv/Scripts/easy_install-3.6-script.py | a1f29dc75bf1e2af2366ebf9c88f94bdff507b63 | [] | no_license | xhongc/pythonCrawl | f334d737326a47782d2533c4db23734729f13099 | a38e59496dd78b6e070ea6882043b1744190103e | refs/heads/master | 2022-12-10T01:22:01.608193 | 2020-01-12T09:43:19 | 2020-01-12T09:43:22 | 93,115,695 | 4 | 5 | null | 2022-11-22T02:36:28 | 2017-06-02T01:47:22 | Python | UTF-8 | Python | false | false | 464 | py | #!C:\Users\xhongc\work\pythonCrawl\learnOpencv\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"408737515@qq.com"
] | 408737515@qq.com |
b42555da9cd6e0002fc0b67b79a21e51526bd952 | f1cdcfe600aa10c871486c2cf5a91f23a00b5e81 | /ch5/dorm.py | 9db7ace7cf0d6f604f3e16dec60cb2c322507726 | [] | no_license | mccarvik/collective_intelligence | 58268c4f5bcf38466951e3ddf96aba1ad05aaa7e | 9bf448eea62fa59e2ec97fdca0cafeb1d4ce5c50 | refs/heads/master | 2021-09-04T22:38:13.581625 | 2018-01-22T20:00:04 | 2018-01-22T20:00:04 | 106,424,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | import random, math, pdb, time
import optimization
# The dorms, each of which has two available spaces
dorms=['Zeus','Athena','Hercules','Bacchus','Pluto']
# People, along with their first and second choices
prefs=[('Toby', ('Bacchus', 'Hercules')),
('Steve', ('Zeus', 'Pluto')),
('Karen', ('Athena', 'Zeus')),
('Sarah', ('Zeus', 'Pluto')),
('Dave', ('Athena', 'Bacchus')),
('Jeff', ('Hercules', 'Pluto')),
('Fred', ('Pluto', 'Athena')),
('Suzie', ('Bacchus', 'Hercules')),
('Laura', ('Bacchus', 'Hercules')),
('James', ('Hercules', 'Athena'))]
# [(0,9),(0,8),(0,7),(0,6),...,(0,0)]
domain=[(0,(len(dorms)*2)-i-1) for i in range(0,len(dorms)*2)]
def printsolution(vec):
slots=[]
# Create two slots for each dorm
for i in range(len(dorms)): slots+=[i,i]
# Loop over each students assignment
for i in range(len(vec)):
x=int(vec[i])
# Choose the slot from the remaining ones
dorm=dorms[slots[x]]
# Show the student and assigned dorm
print prefs[i][0],dorm
# Remove this slot
del slots[x]
def dormcost(vec):
pdb.set_trace()
cost=0
# Create list a of slots
slots=[0,0,1,1,2,2,3,3,4,4]
# Loop over each student
for i in range(len(vec)):
x=int(vec[i])
# The selection of vec in slots is going to be in relation to what is left in slot
# EX: vec could have 4 0's as that will be the fist number in slots which will keep changing
dorm=dorms[slots[x]]
pref=prefs[i][1]
# First choice costs 0, second choice costs 1
if pref[0]==dorm: cost+=0
elif pref[1]==dorm: cost+=1
else: cost+=3
# Not on the list costs 3
# Remove selected slot
del slots[x]
return cost
if __name__ == "__main__":
s = optimization.randomoptimize(domain, dormcost)
print(dormcost(s))
s = optimization.geneticoptimize(domain, dormcost)
print(dormcost(s))
| [
"mccarviks@gmail.com"
] | mccarviks@gmail.com |
d3dea0d179f7790f5f123aabbfaac8f1eac7b1f8 | 0a1f8957a798006deaa53d10d09f733fab1e6b05 | /src/CADAssembler/PostProcessing/Calculix_PostProcess.py | 53efc40f6ca44a42b92ac38869812a268b66624d | [
"LicenseRef-scancode-other-permissive"
] | permissive | metamorph-inc/meta-core | a89504ccb1ed2f97cc6e792ba52e3a6df349efef | bc7a05e04c7901f477fe553c59e478a837116d92 | refs/heads/master | 2023-03-07T02:52:57.262506 | 2023-03-01T18:49:49 | 2023-03-01T18:49:49 | 40,361,476 | 25 | 15 | NOASSERTION | 2023-01-13T16:54:30 | 2015-08-07T13:21:24 | Python | UTF-8 | Python | false | false | 5,229 | py | #title :Calculix_PostProcess.py
#description :This script performs post processing on Calculix output files (.frd).
#author :Di Yao
#date :2012-6-19
#version :1.0.0.0
#usage :python pyscript.py
#notes :
#python_version :2.7
#==============================================================================
import sys
import ComputedMetricsSummary
import math
import AnalysisFunctions
import re
import utility_functions
def ParseCalculixOutputFile(feaName):
skipKey = False
sectionData = list()
f = open(feaName+'.dat', 'r')
for line in f:
line = line.strip()
if line == '': continue
if (line.startswith('stresses')):
skipKey = False
if (len(sectionData) > 0):
CalculateMetrics(sectionData)
#print '=============================='
#print sectionData
sectionData = []
elif (line.startswith('displacements')):
skipKey = False
if (len(sectionData) > 0):
CalculateMetrics(sectionData)
#print '=============================='
#print sectionData
sectionData = []
elif (line.startswith('forces')):
skipKey = True
if (len(sectionData) > 0):
CalculateMetrics(sectionData)
sectionData=[]
continue
if (skipKey == False):
sectionData.append(line)
if (len(sectionData) > 0):
CalculateMetrics(sectionData)
#print '=============================='
#print sectionData
sectionData = []
f.close()
def CalculateMetrics(sectionData):
keyLine = sectionData.pop(0)
if (keyLine.startswith('stresses')):
keys = keyLine.split()
ELSet_ID = keys[5]
maxMises = 0
maxShear = 0
maxBearing = 0
for data in sectionData:
splittedLine = data.split()
stressMatrix = splittedLine[2:] #stressLevels
tmpMise, tmpBear, tmpShear = AnalysisFunctions.FindStressMetrics(stressMatrix)
maxMises = max(maxMises, tmpMise)
maxShear = max(maxShear, tmpShear)
maxBearing = max(maxBearing, tmpBear)
# FactorOfSafety
if (ComputedMetricsSummary.gComponentList.has_key(ELSet_ID)):
tmpComponent = ComputedMetricsSummary.gComponentList[ELSet_ID]
#factorOfSafety = min(float(tmpComponent.MaterialProperty['Shear'])/maxShear,
# float(tmpComponent.MaterialProperty['Bearing'])/maxBearing,
# float(tmpComponent.MaterialProperty['Mises'])/maxMises)
factorOfSafety = float(tmpComponent.MaterialProperty['Mises'])/maxMises
if (tmpComponent.MetricsInfo.has_key('Shear')):
tmpComponent.MetricsOutput[tmpComponent.MetricsInfo['Shear']] = maxShear
if (tmpComponent.MetricsInfo.has_key('Mises')):
tmpComponent.MetricsOutput[tmpComponent.MetricsInfo['Mises']] = maxMises
if (tmpComponent.MetricsInfo.has_key('Bearing')):
tmpComponent.MetricsOutput[tmpComponent.MetricsInfo['Bearing']] = maxBearing
if (tmpComponent.MetricsInfo.has_key('FactorOfSafety')):
tmpComponent.MetricsOutput[tmpComponent.MetricsInfo['FactorOfSafety']] = factorOfSafety
ComputedMetricsSummary.gComponentList[ELSet_ID] = tmpComponent #?
elif (keyLine.startswith('displacements')):
displacementData = dict()
for data in sectionData:
splittedLine = data.split()
displacementData[splittedLine[0]] = AnalysisFunctions.FindDisplacementMagnitude ( float(splittedLine[1]),
float(splittedLine[2]),
float(splittedLine[3]))
if __name__ == '__main__':
try:
feaName = None
paramFile = None
argList = sys.argv
argc = len(argList)
i = 0
while (i < argc):
if (argList[i][:2] == '-i'):
i+=1
feaName = utility_functions.right_trim(argList[i], '.dat')
elif (argList[i][:2] == '-p'):
i+=1
paramFile = argList[i]
i+=1
if not feaName or not paramFile:
exit(1)
ComputedMetricsSummary.ParseXMLFile(paramFile)
ComputedMetricsSummary.PrintComponentList(ComputedMetricsSummary.gComponentList)
ParseCalculixOutputFile(feaName)
ComputedMetricsSummary.WriteXMLFile(ComputedMetricsSummary.gComponentList)
except Exception as e:
print e
print type(e) # prints the type of exception
print type(e).__name__ # prints the type's name
except ZeroDivisionError:
print "division by zero!"
| [
"kevin.m.smyth@gmail.com"
] | kevin.m.smyth@gmail.com |
5e24f92dbeb200f2d413edb16c62470ebe24c5dd | fb5d9f9b4ae3d7059d582ebb390916c2f9528852 | /models/pix2pix_model.py | e60d32a75ef9b52d29453b688af008de8946200f | [] | no_license | tianxiaguixin002/Code-Implementation-of-Super-Resolution-ZOO | 32d4168f4d8d031968b7a601cf68b50730b15b06 | f6ccf309c7653a27173de5184d17bb5933baab14 | refs/heads/master | 2022-11-13T17:09:11.484532 | 2020-07-06T01:51:25 | 2020-07-06T01:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,404 | py | import torch
from .base_model import BaseModel
from . import base_networks
class Pix2PixModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer.
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = base_networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = base_networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = base_networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
"""
self.real_A = input['A'].to(self.device)
self.real_B = input['B'].to(self.device)
self.A_paths = input['A_paths']
self.B_paths = input['B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG(self.real_A) # G(A)
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
pred_fake = self.netD(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
| [
"chenyx.cs@gmail.com"
] | chenyx.cs@gmail.com |
8e32439681edbd47329d1474f20b17008dc11dd4 | ff8ec937d9e5bef6d527f91ec4c8a2248063e9f8 | /Flask_Projects/HuntingBallApp/config/config.py | dcbf3f6a8a86d1b4f003e229f2c60d94f9750b65 | [] | no_license | zyxyuanxiao/Python-Framework-Study-Resources | 3c7743946b828dbd4c0a5b530363d36e54319e9c | cff0f9cefa36afa9fb43f0af5478b7428795d718 | refs/heads/master | 2020-09-04T15:00:06.987122 | 2019-08-19T10:07:29 | 2019-08-19T10:07:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,080 | py | # -*- coding: utf-8 -*-
# @Time : 2018/10/20 下午4:02
# @Author : ShaHeTop-Almighty-ares
# @Email : yang6333yyx@126.com
# @File : config.py
# @Software: PyCharm
from datetime import timedelta
import redis
import os
def app_conf():
"""
# 设置环境
export FLASK_ENV=development
export FLASK_ENV=production
PS:
* 由于使用PyCharm直接运行时无法通过os.environ.get('FLASK_ENV')获取到系统变量,所以export FLASK_ENV=='环境'之后FLASK_ENV依然为None。
** 在Flask中FLASK_ENV==None 会默认使用production作为环境。
*** 需要使用终端python run.py执行。os.environ.get('FLASK_ENV')才会生效获取到设置的环境。
**** 为了方便使用PyCharm进行开发调试:添加使用以下代码将production覆盖。
解决方法:
(1)使用以下代码覆盖 //部署生产环境时注释以下代码
if not os.environ.get('FLASK_ENV'):
config_key = 'default'
print('Pycharm开发环境:%s' % config_key)
return config_key
(2)在PyCharm设置变量FLASK_ENV=development
"""
config_key = 'development'
if os.environ.get('FLASK_ENV') == 'development':
config_key = 'development'
# print('开发环境:%s' % config_key)
return config_key
elif os.environ.get('FLASK_ENV') == 'production':
config_key = 'production'
# print('生产环境:%s' % config_key)
return config_key
else:
config_key = 'production'
# print('生产环境:%s' % config_key)
return config_key
class BaseConfig:
"""配置基类"""
# SECRET_KEY = os.urandom(24)
SECRET_KEY = 'ShaHeTop-Almighty-ares' # session加密
PERMANENT_SESSION_LIFETIME = timedelta(days=30) # 设置session过期时间
DEBUG = True
# SERVER_NAME = 'example.com'
RUN_HOST = '0.0.0.0'
RUN_PORT = 9999
"""Mysql"""
HOSTNAME = '127.0.0.1'
PORT = '3306'
USERNAME = 'root'
PASSWORD = '123456'
DATABASE = 'HuntingBallApp'
# &autocommit=true
DB_URI = 'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8'.format(
USERNAME,
PASSWORD,
HOSTNAME,
PORT,
DATABASE)
SQLALCHEMY_DATABASE_URI = DB_URI
SQLALCHEMY_TRACK_MODIFICATIONS = True
"""Redis"""
# host是redis主机,需要redis服务端和客户端都起着 redis默认端口是6379
REDIS_PWD = 123456
POOL = redis.ConnectionPool(host='localhost', port=6379, password=REDIS_PWD, decode_responses=True, db=1)
R = redis.Redis(connection_pool=POOL)
@staticmethod
def init_app(app):
pass
class ProductionConfig(BaseConfig):
"""生产环境"""
DEBUG = False
RUN_PORT = 5000
PASSWORD = 'okcokc111111' # mysql
REDIS_PWD = 'okc1111' # redis
class DevelopmentConfig(BaseConfig):
"""开发环境"""
pass
config_obj = {
'production': ProductionConfig,
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
if __name__ == '__main__':
print(config_obj['default'].DB_URI) | [
"yang6333yyx@126.com"
] | yang6333yyx@126.com |
d163d193ab625d2cae6dc04b0724d037904fd11f | 8487cb41afd00c9cc30402fd3f06e7f52650669e | /python/Run2016G-Nano14Dec2018-v1/MET_cff.py | 6daabedbe6cf3b8f2b2d7082e12dc2e3ef26cd34 | [] | no_license | TreeMaker/ecalBadCalibFilterLists | bf7fd35aee6bccc90b214542e163a94c3f3749f3 | 1e43aff994dff79d768b7b9d284aab6cb951e864 | refs/heads/master | 2020-04-24T05:58:37.174381 | 2019-03-13T20:58:15 | 2019-03-13T20:58:15 | 171,749,544 | 0 | 2 | null | 2019-03-13T20:58:16 | 2019-02-20T21:08:38 | C | UTF-8 | Python | false | false | 1,911 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/259999CE-35DF-FC40-94F3-AF083D3D615B.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/2FFD045A-0E81-494D-A1C5-5D05A7D261F9.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/36B5871A-2642-C645-B678-000A44EEA080.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/3E28CDD2-923E-7B4B-8347-B2A3A77D9295.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/3EAE39E3-9984-3347-B36D-F780D8D9EE42.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/5CB2D2AB-E077-3F44-A856-56382B8D37FE.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/66DE259F-79E4-3E4A-B321-EAE3541F4E63.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/69885CC9-D601-5D4D-94BA-A89F4A523567.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/783BB280-F41E-9245-9789-0E2D4D917A4C.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/8461E89F-62D0-F343-83A2-46966D193DD2.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/A442A143-884E-0148-BB51-9BBCF5FD6BA5.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/AA1B8977-17ED-864B-8F4E-0777D0FAD48C.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/B5830E7A-9B91-3C40-A94B-2E277526DA74.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/DB06E5F5-DF77-DE4D-A57B-61AC72A034CE.root',
'/store/data/Run2016G/MET/NANOAOD/Nano14Dec2018-v1/80000/E635B26B-BBF9-9346-A1D5-C68388711262.root',
] )
| [
"Alexx.Perloff@Colorado.edu"
] | Alexx.Perloff@Colorado.edu |
580a57ae46929c6900e795d79b5db84f2c334313 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/158/48878/submittedfiles/testes.py | 8ce6c6878e8f49b7c1aa558cb328afafad3ddb5b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
cf=float(input('custo de fábrica:'))
cc=cf+((28/100)*cf)+((45/100)*cf)
print('o valor final do carro é: %.2f' %cc)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
346d53ab383825b68c08ad5965f9c6b063709893 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_dampers.py | 3dcd5ff2c852940976e53da2f7db1e902d48cf0b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _DAMPERS():
def __init__(self,):
self.name = "DAMPERS"
self.definitions = damper
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['damper']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ed089dd7e4ef4bb26dae77576405c261a91ab7f2 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-dms-enterprise/aliyunsdkdms_enterprise/request/v20181101/ListDatabaseUserPermssionsRequest.py | 1f27eb7699a8380d923ac31a72e1792a68f5f12e | [
"Apache-2.0"
] | permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 2,355 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdms_enterprise.endpoint import endpoint_data
class ListDatabaseUserPermssionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dms-enterprise', '2018-11-01', 'ListDatabaseUserPermssions','dmsenterprise')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PermType(self):
return self.get_query_params().get('PermType')
def set_PermType(self,PermType):
self.add_query_param('PermType',PermType)
def get_DbId(self):
return self.get_query_params().get('DbId')
def set_DbId(self,DbId):
self.add_query_param('DbId',DbId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Logic(self):
return self.get_query_params().get('Logic')
def set_Logic(self,Logic):
self.add_query_param('Logic',Logic)
def get_Tid(self):
return self.get_query_params().get('Tid')
def set_Tid(self,Tid):
self.add_query_param('Tid',Tid)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_UserName(self):
return self.get_query_params().get('UserName')
def set_UserName(self,UserName):
self.add_query_param('UserName',UserName) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
f6f93bec6a9ed313c15f650bd437670efc3c85ce | 44ba7f2c3e396ab2c58ce42763da5c18f5d0db4b | /ethicml/implementations/svm.py | 85c8de88e2b577e559d2c95422816287de183b90 | [] | no_license | anonymous-iclr-3518/code-for-submission | 99e45110d2377c08433b619afb9c14cf645be5b0 | 3aecb7642d9611ae0a61cd47948931f8f47b6f76 | refs/heads/main | 2023-01-13T18:27:03.728542 | 2020-11-25T15:21:49 | 2020-11-25T15:21:49 | 315,338,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | """Implementation of SVM (actually just a wrapper around sklearn)."""
from pathlib import Path
import numpy as np
from sklearn.svm import SVC, LinearSVC
from .utils import InAlgoArgs
class SvmArgs(InAlgoArgs):
"""Commandline arguments for SVM."""
c: float
kernel: str
def main():
"""This function runs the SVM model as a standalone program."""
args = SvmArgs().parse_args()
with open(args.train, "rb") as train_file:
train = np.load(train_file)
train_x, train_y = train["x"], train["y"]
with open(args.test, "rb") as test_file:
test = np.load(test_file)
test_x = test["x"]
if args.kernel == "linear":
clf = LinearSVC(C=args.c, dual=False, tol=1e-12, random_state=888)
else:
clf = SVC(C=args.c, kernel=args.kernel, gamma="auto", random_state=888)
clf.fit(train_x, train_y.ravel())
predictions = clf.predict(test_x)
np.savez(Path(args.predictions), hard=predictions)
if __name__ == "__main__":
main()
| [
"anon@ymo.us"
] | anon@ymo.us |
e367812cc0beb8b25b485671395f92b4d26a3224 | 9aaa5eccdb29909c48de9f03732d598fa66920e5 | /binheap.py | 1c50eeecfb93a62782851c8d25b5c828fd75b419 | [
"MIT"
] | permissive | vector8188/AlgorithmAnalysisPython | 76e09126be0654c9eca0a53b6153129bf8beff46 | 026ca8bf846a504c5eae1677680306b0462b49b9 | refs/heads/master | 2021-01-22T13:23:39.617256 | 2018-09-14T13:54:21 | 2018-09-14T13:54:21 | 100,664,673 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,269 | py | class BinHeap:
"""Class for implementing BinHeap."""
def __init__(self):
"""Bin heap constructor."""
self.heapList = [0]
self.currentSize = 0
def percUp(self, i):
"""Checks if the newly entered item is greater/lesser than parent."""
while i > 0:
if self.heapList[i] < self.heapList[i//2]:
# if the i//2'th element(parent) is > i(children) then swap.
# which means if the parent > chilren, we need to maintain
# valid heap datastructure where parent are always less than
# chilren.
tmp = self.heapList[i//2]
self.heapList[i//2] = self.heapList[i]
self.heapList[i] = tmp
i = i//2
def insert(self, k):
self.heapList.append(k)
self.currentSize = self.currentSize + 1
self.percUp(self.currentSize)
def percDown(self, i):
print("i --> {}".format(i))
result = i*2 <= self.currentSize
print("Testing {} <= {} --> {}".format(i*2, self.currentSize, result))
while i*2 <= self.currentSize:
print("heaplist before swap is {}".format(self.heapList))
mc = self.minChild(i)
print("mc <-- {}".format(mc))
print(
"self.heapList[{}] is compared with self.heapList[{}]".format(
i, mc))
if self.heapList[i] > self.heapList[mc]:
print(
"self.heapList[{}]-->({}) > self.heapList[{}] --> ({})".
format(i, self.heapList[i], mc, self.heapList[mc]))
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc]
self.heapList[mc] = tmp
print("heaplist after swap is {}".format(self.heapList))
print("i <-- mc".format(mc))
i = mc
result = i*2 <= self.currentSize
print("i --> {} \n".format(i))
print("Testing {} <= {} --> {}".format(
i*2, self.currentSize, result))
def minChild(self, i):
print("Evaluating minimum of two leaf nodes")
if i*2+1 > self.currentSize:
print("i*2+1-->({}) > self.currentSize-->({})".format(
i*2+1, self.currentSize))
return i*2
else:
print(
"self.heapList[{}] is compared with self.heapList[{}]".format(
i*2+1, i*2))
if self.heapList[i*2+1] > self.heapList[i*2]:
print(
"self.heapList[{}] --> {} is > self.heapList[{}] --> {}".
format(
i*2+1, self.heapList[i*2+1], i*2, self.heapList[i*2]))
print(
"returning {} as an index of child with lesser value".
format(i*2))
return i*2
else:
print(
"self.heapList[{}] --> {} is > self.heapList[{}] --> {}".
format(
i*2, self.heapList[i*2], i*2+1, self.heapList[i*2+1]))
print(
"returning {} as an index of child with lesser value".
format(i*2+1))
return i*2+1
def delMin(self):
print("hepList before deletion: {}".format(self.heapList))
# select lowest element in the heap, which is root.
retval = self.heapList[1]
# select the last item in the list and move it in the front
# in place of root, retval already have same value.
self.heapList[1] = self.heapList[self.currentSize]
# reduce the size of currentSize by one.
self.currentSize = self.currentSize - 1
# pop the highest item in the list
self.heapList.pop()
# percolate down the fatty, sink motherfucker sink.
self.percDown(1)
print("hepList after deletion: {}".format(self.heapList))
return retval
def buildHeap(self, alist):
i = len(alist)//2
self.currentSize = len(alist)
self.heapList = [0] + alist[:]
while (i > 0):
self.percDown(i)
i = i - 1
bh = BinHeap()
bh.buildHeap([9,5,6,2,3])
bh.delMin()
| [
"vaibhav.rbs@gmail.com"
] | vaibhav.rbs@gmail.com |
584d4619db06c8d1462cb07e7215ad04c548557e | 31681488e69da3c7e00b0eda28e5cb720ef2299c | /liteiclink/serwb/packet.py | e8fc035b1b096ded56e1cea8560fc1819ccb2679 | [
"BSD-2-Clause"
] | permissive | zsipos/liteiclink | 4e9bdf6a819f490461cb33d0837247041203071d | 864cd831f3475dffd1c92d6d4a1b86608680bcf2 | refs/heads/master | 2021-07-08T07:43:10.897604 | 2020-01-28T09:40:17 | 2020-01-28T09:40:17 | 245,119,569 | 0 | 0 | NOASSERTION | 2020-03-05T09:25:16 | 2020-03-05T09:25:15 | null | UTF-8 | Python | false | false | 4,839 | py | # This file is Copyright (c) 2017-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
from math import ceil
from migen import *
from migen.genlib.misc import WaitTimer
from litex.gen import *
from litex.soc.interconnect import stream
class HeaderField:
def __init__(self, byte, offset, width):
self.byte = byte
self.offset = offset
self.width = width
class Header:
def __init__(self, fields, length, swap_field_bytes=True):
self.fields = fields
self.length = length
self.swap_field_bytes = swap_field_bytes
def get_layout(self):
layout = []
for k, v in sorted(self.fields.items()):
layout.append((k, v.width))
return layout
def get_field(self, obj, name, width):
if "_lsb" in name:
field = getattr(obj, name.replace("_lsb", ""))[:width]
elif "_msb" in name:
field = getattr(obj, name.replace("_msb", ""))[width:2*width]
else:
field = getattr(obj, name)
if len(field) != width:
raise ValueError("Width mismatch on " + name + " field")
return field
def encode(self, obj, signal):
r = []
for k, v in sorted(self.fields.items()):
start = v.byte*8 + v.offset
end = start + v.width
field = self.get_field(obj, k, v.width)
if self.swap_field_bytes:
field = reverse_bytes(field)
r.append(signal[start:end].eq(field))
return r
def decode(self, signal, obj):
r = []
for k, v in sorted(self.fields.items()):
start = v.byte*8 + v.offset
end = start + v.width
field = self.get_field(obj, k, v.width)
if self.swap_field_bytes:
r.append(field.eq(reverse_bytes(signal[start:end])))
else:
r.append(field.eq(signal[start:end]))
return r
def phy_description(dw):
layout = [("data", dw)]
return stream.EndpointDescription(layout)
def user_description(dw):
layout = [
("data", 32),
("length", 32)
]
return stream.EndpointDescription(layout)
class Packetizer(Module):
def __init__(self):
self.sink = sink = stream.Endpoint(user_description(32))
self.source = source = stream.Endpoint(phy_description(32))
# # #
# Packet description
# - preamble : 4 bytes
# - length : 4 bytes
# - payload
fsm = FSM(reset_state="PREAMBLE")
self.submodules += fsm
fsm.act("PREAMBLE",
If(sink.valid,
source.valid.eq(1),
source.data.eq(0x5aa55aa5),
If(source.ready,
NextState("LENGTH")
)
)
)
fsm.act("LENGTH",
source.valid.eq(1),
source.data.eq(sink.length),
If(source.ready,
NextState("DATA")
)
)
fsm.act("DATA",
source.valid.eq(sink.valid),
source.data.eq(sink.data),
sink.ready.eq(source.ready),
If(source.ready & sink.last,
NextState("PREAMBLE")
)
)
class Depacketizer(Module):
def __init__(self, clk_freq, timeout=10):
self.sink = sink = stream.Endpoint(phy_description(32))
self.source = source = stream.Endpoint(user_description(32))
# # #
count = Signal(len(source.length))
length = Signal(len(source.length))
# Packet description
# - preamble : 4 bytes
# - length : 4 bytes
# - payload
fsm = FSM(reset_state="PREAMBLE")
self.submodules += fsm
timer = WaitTimer(clk_freq*timeout)
self.submodules += timer
fsm.act("PREAMBLE",
sink.ready.eq(1),
If(sink.valid &
(sink.data == 0x5aa55aa5),
NextState("LENGTH")
)
)
fsm.act("LENGTH",
sink.ready.eq(1),
If(sink.valid,
NextValue(count, 0),
NextValue(length, sink.data),
NextState("DATA")
),
timer.wait.eq(1)
)
fsm.act("DATA",
source.valid.eq(sink.valid),
source.last.eq(count == (length[2:] - 1)),
source.length.eq(length),
source.data.eq(sink.data),
sink.ready.eq(source.ready),
If(timer.done,
NextState("PREAMBLE")
).Elif(source.valid & source.ready,
NextValue(count, count + 1),
If(source.last,
NextState("PREAMBLE")
)
),
timer.wait.eq(1)
)
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
d2d2f55bf58acf2c7b0638ee9c3f974eddcc7f15 | 1f41b828fb652795482cdeaac1a877e2f19c252a | /maya_tools_backup/chRig/python/chModules/jointBasePsd/ui/part1_driverInfo.py | f84898d94a6be94c7c1a4dcd28d3858e67aa209f | [] | no_license | jonntd/mayadev-1 | e315efe582ea433dcf18d7f1e900920f5590b293 | f76aeecb592df766d05a4e10fa2c2496f0310ca4 | refs/heads/master | 2021-05-02T07:16:17.941007 | 2018-02-05T03:55:12 | 2018-02-05T03:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,678 | py | import maya.cmds as cmds
import uifunctions as uifnc
import globalInfo
import math
from functools import partial
class MovedDriverList:
def __init__(self, width, targetUI, minValue=0.1 ):
self._width = width-25
self._minValue = minValue
self._updateTargetUi = targetUI
def driverScrollAddPopupCmd(self, *args ):
try: cmds.deleteUI( self.popupUi, menu=1 )
except: pass
self.popupUi = cmds.popupMenu( p=self._updateTargetUi )
def removeSelCmd( *args ):
si = cmds.textScrollList( self._updateTargetUi, q=1, si=1 )
cmds.textScrollList( self._updateTargetUi, e=1, ri=si )
def removeAllCmd( *args ):
cmds.textScrollList( self._updateTargetUi, e=1, ra=1 )
#cmds.deleteUI( self.popupUi, menu=1 )
cmds.menuItem( l='Remove All', c=removeAllCmd )
def addConnectDriver(self, str1, *args ):
driverName = str1.split( ':' )[0]
strList = cmds.textScrollList( self._updateTargetUi, q=1, ai=1 )
if not strList: strList = []
for strTarget in strList:
targetDriverName = strTarget.split( ':' )[0]
if driverName == targetDriverName:
cmds.textScrollList( self._updateTargetUi, e=1, ri=strTarget )
cmds.textScrollList( self._updateTargetUi, e=1, a=str1 )
def add(self, driverName, angleValues=[] ):
if not angleValues:
angleValues = [0,0,0]
defaultBgc = [ .1, .1, .1 ]
onBgc = [ .9, .9, .2 ]
enList = [0,0,0]
bgcList = [None,None,None]
for i in range( 3 ):
if math.fabs( angleValues[i] ) >= self._minValue:
bgcList[i] = onBgc
enList[i] = 1
else:
bgcList[i] = defaultBgc
enList[i] = 0
widthList = uifnc.setWidthByPerList( [70,15,15,15] , self._width )
cmds.rowColumnLayout( nc=4, cw=[(1,widthList[0]),(2,widthList[1]),(3,widthList[2]),(4,widthList[3])] )
cmds.text( l= driverName+' : ', al='right' )
cmds.floatField( precision=2, v=angleValues[0], bgc= bgcList[0] )
cmds.popupMenu(); cmds.menuItem( l='Add Driver', c= partial( self.addConnectDriver, driverName+' | angle0 : %3.2f' %angleValues[0] ) )
cmds.floatField( precision=2, v=angleValues[1], bgc= bgcList[1] )
cmds.popupMenu(); cmds.menuItem( l='Add Driver', c= partial( self.addConnectDriver, driverName+' | angle1 : %3.2f' %angleValues[1] ) )
cmds.floatField( precision=2, v=angleValues[2], bgc= bgcList[2] )
cmds.popupMenu(); cmds.menuItem( l='Add Driver', c= partial( self.addConnectDriver, driverName+' | angle2 : %3.2f' %angleValues[2] ) )
self.driverScrollAddPopupCmd()
cmds.setParent( '..' )
class Cmd:
def __init__(self, width ):
globalInfo.driverInfoInst = self
def updateCmd( self, *args ):
rootName = globalInfo.rootDriver
minValue = 0.1
movedDriverCheck = cmds.checkBox( self._movedDriverCheck, q=1, v=1 )
children = cmds.listRelatives( rootName, c=1, ad=1, f=1 )
angleDriverList = []
for child in children:
hists = cmds.listHistory( child )
for hist in hists:
if cmds.nodeType( hist ) == 'angleDriver':
if not hist in angleDriverList:
angleDriverList.append( hist )
showDrivers = []
for driver in angleDriverList:
if movedDriverCheck:
angle1, angle2, angle3 = cmds.getAttr( driver+'.outDriver' )[0]
if math.fabs( angle1 ) > minValue or math.fabs( angle2 ) > minValue or math.fabs( angle3 ) > minValue:
showDrivers.append( driver )
else:
showDrivers.append( driver )
childUis = cmds.scrollLayout( self._driverListLay, q=1, ca=1 )
if childUis:
for childUi in childUis:
cmds.deleteUI( childUi )
cmds.setParent( self._driverListLay )
for driver in showDrivers:
values = cmds.getAttr( driver+'.outDriver' )[0]
self._movedDriverInst.add( driver, values )
self._movedDrivers = showDrivers
self.reWriteValueCmd()
def reWriteValueCmd( self ):
items = cmds.textScrollList( self._driverScrollList, q=1, ai=1 )
if not items: items = []
for item in items:
driverName, other = item.split( ' | angle' )
angleIndex, angleValue = other.split( ' : ' )
angleValue = cmds.getAttr( driverName+'.outDriver%s' % angleIndex )
reItem = driverName+' | angle'+angleIndex+' : %3.2f' % angleValue
cmds.textScrollList( self._driverScrollList, e=1, ri=item )
if angleValue > 0.1:
cmds.textScrollList( self._driverScrollList, e=1, a=reItem )
class Add( Cmd ):
def __init__(self, width ):
self._emptyWidth = 10
self._width = width - self._emptyWidth*2 - 4
self._height = 140
sepList = [ 65, 50 ]
self._mainWidthList = uifnc.setWidthByPerList( sepList, self._width )
sepList = [ 70, 30 ]
self._optionWidthList = uifnc.setWidthByPerList( sepList, self._mainWidthList[0]-20 )
Cmd.__init__( self, self._mainWidthList[0] )
self._rowColumns = []
self.core()
def core(self):
column1 = cmds.rowColumnLayout( nc= 3, cw=[(1,self._emptyWidth),
(2,self._width),
(3,self._emptyWidth)])
uifnc.setSpace()
cmds.text( l='Driver LIST' )
uifnc.setSpace()
cmds.setParent( '..' )
uifnc.setSpace( 5 )
column2 = cmds.rowColumnLayout( nc=4, cw=[(1,self._emptyWidth),
(2,self._mainWidthList[0]),
(3,self._mainWidthList[1]),
(4,self._emptyWidth) ] )
uifnc.setSpace()
column3 = cmds.rowColumnLayout( nc=1, cw=[(1,self._mainWidthList[0])])
self._driverListLay = cmds.scrollLayout( h=self._height-30 )
cmds.setParent( '..' )
uifnc.setSpace( 5 )
column4 = cmds.rowColumnLayout( nc= 4, cw=[(1,self._emptyWidth),
(2,self._optionWidthList[0]),
(3,self._optionWidthList[1]),
(4,self._emptyWidth)] )
uifnc.setSpace()
self._movedDriverCheck = cmds.checkBox( l='Show Only Moved Drivers', cc= self.updateCmd )
cmds.button( l='Refresh', c= self.updateCmd )
uifnc.setSpace()
cmds.setParent( '..' )
cmds.setParent( '..' )
self._driverScrollList = cmds.textScrollList( h= self._height )
self._movedDriverInst = MovedDriverList( self._mainWidthList[0], self._driverScrollList )
uifnc.setSpace()
cmds.setParent( '..' )
self._rowColumns = [ column1, column2, column3, column4 ] | [
"kimsung9k@naver.com"
] | kimsung9k@naver.com |
532ef36c34decb44e73a5e1b81beb7a67c57cc0a | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L83/83-79_MD_NVT_rerun/set_1.py | e52dbdc3dbc2b5c4afd43f06c240d855b04ecbb2 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L83/MD_NVT_rerun/ti_one-step/83_79/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_1.in'
temp_pbs = filesdir + 'temp_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_1.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
29fe4042cd2cbd2f2ca9d31a58cf53afd5ba5298 | 4368c51ce45504e2cc17ea8772eeb94c13e1c34a | /utils/meta_utils.py | 2c8068279495081ff5d67c14a8d980c40f3f982b | [] | no_license | Shuai-Xie/metaASM | 1eddc02846ee3fc05198883277357f9735dbaeb0 | c6a7b8fe3ecbca2bdc874e3b0dad6dd8f8c1c4cd | refs/heads/master | 2021-03-18T17:57:12.952618 | 2020-04-03T14:20:12 | 2020-04-03T14:20:12 | 247,087,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | import torch
import numpy as np
import random
from datasets import CIFAR
from datasets.dataset_utils import get_cls_img_idxs_dict
from datasets.transforms import transform_train
from utils.asm_utils import detect_unlabel_imgs, get_select_fn
"""
sort each cls samples by criterion
"""
@torch.no_grad()
def sort_cls_samples(model, label_dataset, num_classes, criterion='lc'):
# 每类图片 idxs
cls_img_idxs = get_cls_img_idxs_dict(label_dataset.targets, num_classes)
y_pred_prob = detect_unlabel_imgs(model, label_dataset.data, num_classes, bs=100) # [N,10] prob vector
sort_cls_idxs_dict = {}
assert criterion in ['rs', 'lc', 'ms', 'en'], 'no such criterion'
select_fn = get_select_fn(criterion)
for cls_idx, img_idxs in cls_img_idxs.items():
img_idxs = np.array(img_idxs)
cls_probs = y_pred_prob[img_idxs] # [n,10]
# sorted idxs in list
_, sort_cls_idxs = select_fn(cls_probs, n_samples=len(cls_probs)) # sort total
# recover to total label idx
sort_cls_idxs_dict[cls_idx] = img_idxs[sort_cls_idxs]
return sort_cls_idxs_dict
def check_sample_targets(cls_idxs_dict, targets):
for cls, img_idxs in cls_idxs_dict.items():
print('class:', cls, [targets[i] for i in img_idxs])
"""
build meta dataset by different sampling methods
"""
def build_meta_dataset(label_dataset, idx_to_meta):
random.shuffle(idx_to_meta) # 原本 samples 按 cls 顺序排列
meta_dataset = CIFAR(
data=np.take(label_dataset.data, idx_to_meta, axis=0),
targets=np.take(label_dataset.targets, idx_to_meta, axis=0),
transform=transform_train
)
return meta_dataset
# random sample
def random_sample_meta_dataset(label_dataset, num_meta, num_classes):
img_idxs = list(range(len(label_dataset.targets)))
random.shuffle(img_idxs)
idx_to_meta = img_idxs[:int(num_meta * num_classes)]
return build_meta_dataset(label_dataset, idx_to_meta)
def random_sample_equal_cls(label_dataset, cls_img_idxs_dict, num_meta):
idx_to_meta = []
for cls, img_idxs in cls_img_idxs_dict.items():
idx_to_meta.extend(random.sample(img_idxs, num_meta))
return build_meta_dataset(label_dataset, idx_to_meta)
# random sample in a systematic way, loyal to original data distribution
# cover all hard-level samples
def random_system_sample_meta_dataset(label_dataset, sort_cls_idxs_dict, num_meta, mid=None): # 等距抽样
idx_to_meta = []
for cls, img_idxs in sort_cls_idxs_dict.items(): # 能处理各类 样本数量不同, list 不等长
step = len(img_idxs) // num_meta
mid = mid % step if mid else random.randint(0, step) # 指定每个系统内 要取的元素位置
idx_to_meta.extend([img_idxs[min(i * step + mid, len(img_idxs) - 1)]
for i in range(num_meta)]) # 等间隔
return build_meta_dataset(label_dataset, idx_to_meta)
# sample top hard samples on label_dataset
# 不带随机后,选出的样本固定了...
def sample_top_hard_meta_dataset(label_dataset, sort_cls_idxs_dict, num_meta):
idx_to_meta = []
for cls, img_idxs in sort_cls_idxs_dict.items(): # 各类按难度降序排列
idx_to_meta.extend(img_idxs[:num_meta])
return build_meta_dataset(label_dataset, idx_to_meta)
# sample top easy samples on label_dataset
def sample_top_easy_meta_dataset(label_dataset, sort_cls_idxs_dict, num_meta):
idx_to_meta = []
for cls, img_idxs in sort_cls_idxs_dict.items(): # 各类按难度降序排列
idx_to_meta.extend(img_idxs[-num_meta:])
return build_meta_dataset(label_dataset, idx_to_meta)
| [
"shuaixie@zju.edu.cn"
] | shuaixie@zju.edu.cn |
9c39907cb189fd01a905f1183f03c509c54c9867 | c89e4099f801cb4e71b732f74ba2237883de0b16 | /spider/concurrent/concur_threads_insts.py | 0dbe75f7f91c002521ecda1898366e1fa47d83e3 | [
"BSD-2-Clause"
] | permissive | JiyangZhang/PSpider | 3abc14792875e306d4a0207f1cd872834c35335c | 2151bbdd028acfa5794acab6c87988dc4bf485d3 | refs/heads/master | 2021-08-19T01:31:22.975247 | 2017-11-24T10:03:10 | 2017-11-24T10:38:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,792 | py | # _*_ coding: utf-8 _*_
"""
concur_threads_insts.py by xianhu
"""
import time
import logging
from .concur_abase import TPEnum, BaseThread
# ===============================================================================================================================
def work_fetch(self):
"""
procedure of fetching, auto running, and return False if you need stop thread
"""
# ----1----
priority, url, keys, deep, repeat = self._pool.get_a_task(TPEnum.URL_FETCH)
# ----2----
fetch_result, content = self._worker.working(priority, url, keys, deep, repeat)
# ----3----
if fetch_result == 1:
self._pool.update_number_dict(TPEnum.URL_FETCH_SUCC, +1)
self._pool.add_a_task(TPEnum.HTM_PARSE, (priority, url, keys, deep, content))
elif fetch_result == 0:
self._pool.add_a_task(TPEnum.URL_FETCH, (priority+1, url, keys, deep, repeat+1))
else:
self._pool.update_number_dict(TPEnum.URL_FETCH_FAIL, +1)
# ----4----
self._pool.finish_a_task(TPEnum.URL_FETCH)
# ----5----
while (self._pool.get_number_dict(TPEnum.HTM_NOT_PARSE) > 500) or (self._pool.get_number_dict(TPEnum.ITEM_NOT_SAVE) > 500):
logging.debug("%s[%s] sleep 5 seconds because of too many 'HTM_NOT_PARSE' or 'ITEM_NOT_SAVE'...", self.__class__.__name__, self.getName())
time.sleep(5)
return False if fetch_result == -2 else True
FetchThread = type("FetchThread", (BaseThread,), dict(working=work_fetch))
# ===============================================================================================================================
def work_parse(self):
"""
procedure of parsing, auto running, and only return True
"""
# ----1----
priority, url, keys, deep, content = self._pool.get_a_task(TPEnum.HTM_PARSE)
# ----2----
parse_result, url_list, save_list = self._worker.working(priority, url, keys, deep, content)
# ----3----
if parse_result > 0:
self._pool.update_number_dict(TPEnum.HTM_PARSE_SUCC, +1)
for _url, _keys, _priority in url_list:
self._pool.add_a_task(TPEnum.URL_FETCH, (_priority, _url, _keys, deep+1, 0))
for item in save_list:
self._pool.add_a_task(TPEnum.ITEM_SAVE, (url, keys, item))
else:
self._pool.update_number_dict(TPEnum.HTM_PARSE_FAIL, +1)
# ----4----
self._pool.finish_a_task(TPEnum.HTM_PARSE)
return True
ParseThread = type("ParseThread", (BaseThread,), dict(working=work_parse))
# ===============================================================================================================================
def work_save(self):
"""
procedure of saving, auto running, and only return True
"""
# ----1----
url, keys, item = self._pool.get_a_task(TPEnum.ITEM_SAVE)
# ----2----
save_result = self._worker.working(url, keys, item)
# ----3----
if save_result:
self._pool.update_number_dict(TPEnum.ITEM_SAVE_SUCC, +1)
else:
self._pool.update_number_dict(TPEnum.ITEM_SAVE_FAIL, +1)
# ----4----
self._pool.finish_a_task(TPEnum.ITEM_SAVE)
return True
SaveThread = type("SaveThread", (BaseThread,), dict(working=work_save))
# ===============================================================================================================================
def init_monitor_thread(self, name, pool, sleep_time=5):
"""
constructor of MonitorThread
"""
BaseThread.__init__(self, name, None, pool)
self._sleep_time = sleep_time # sleeping time in every loop
self._init_time = time.time() # initial time of this spider
self._last_fetch_num = 0 # fetch number in last time
self._last_parse_num = 0 # parse number in last time
self._last_save_num = 0 # save number in last time
return
def work_monitor(self):
"""
monitor the pool, auto running, and return False if you need stop thread
"""
time.sleep(self._sleep_time)
info = "%s status: running_tasks=%s;" % (self._pool.__class__.__name__, self._pool.get_number_dict(TPEnum.TASKS_RUNNING))
cur_not_fetch = self._pool.get_number_dict(TPEnum.URL_NOT_FETCH)
cur_fetch_succ = self._pool.get_number_dict(TPEnum.URL_FETCH_SUCC)
cur_fetch_fail = self._pool.get_number_dict(TPEnum.URL_FETCH_FAIL)
cur_fetch_all = cur_fetch_succ + cur_fetch_fail
info += " fetch:[NOT=%d, SUCC=%d, FAIL=%d, %d/(%ds)];" % (cur_not_fetch, cur_fetch_succ, cur_fetch_fail, cur_fetch_all-self._last_fetch_num, self._sleep_time)
self._last_fetch_num = cur_fetch_all
cur_not_parse = self._pool.get_number_dict(TPEnum.HTM_NOT_PARSE)
cur_parse_succ = self._pool.get_number_dict(TPEnum.HTM_PARSE_SUCC)
cur_parse_fail = self._pool.get_number_dict(TPEnum.HTM_PARSE_FAIL)
cur_parse_all = cur_parse_succ + cur_parse_fail
info += " parse:[NOT=%d, SUCC=%d, FAIL=%d, %d/(%ds)];" % (cur_not_parse, cur_parse_succ, cur_parse_fail, cur_parse_all-self._last_parse_num, self._sleep_time)
self._last_parse_num = cur_parse_all
cur_not_save = self._pool.get_number_dict(TPEnum.ITEM_NOT_SAVE)
cur_save_succ = self._pool.get_number_dict(TPEnum.ITEM_SAVE_SUCC)
cur_save_fail = self._pool.get_number_dict(TPEnum.ITEM_SAVE_FAIL)
cur_save_all = cur_save_succ + cur_save_fail
info += " save:[NOT=%d, SUCC=%d, FAIL=%d, %d/(%ds)];" % (cur_not_save, cur_save_succ, cur_save_fail, cur_save_all-self._last_save_num, self._sleep_time)
self._last_save_num = cur_save_all
info += " total_seconds=%d" % (time.time() - self._init_time)
logging.warning(info)
return False if self._pool.get_monitor_stop_flag() else True
MonitorThread = type("MonitorThread", (BaseThread,), dict(__init__=init_monitor_thread, working=work_monitor))
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
f3ad2d30d023ac96ee324cece587c787ec28b6ad | 93652e0f73558ffa24059647324f79ba043ba241 | /topi/tests/python/test_topi_clip.py | 041565433bccd162ef55c48cb1e6cd6f106a8200 | [
"Apache-2.0"
] | permissive | souptc/tvm | 830b1444435b6bda267df305538a783eb687d473 | a8574e7bb814997cb3920a72035071899635b753 | refs/heads/master | 2020-03-25T12:42:20.686770 | 2018-08-06T21:07:38 | 2018-08-06T21:07:38 | 143,789,191 | 1 | 0 | Apache-2.0 | 2018-08-06T22:18:20 | 2018-08-06T22:18:19 | null | UTF-8 | Python | false | false | 1,458 | py | """Test code for clip operator"""
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def verify_clip(N, a_min, a_max, dtype):
A = tvm.placeholder((N, N), dtype=dtype, name='A')
B = topi.clip(A, a_min, a_max)
s = tvm.create_schedule([B.op])
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_clip")
def get_ref_data():
a_np = np.random.uniform(a_min*2, a_max*2, size=(N, N)).astype(dtype)
b_np = np.clip(a_np, a_min, a_max)
return a_np, b_np
a_np, b_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B], device, name="clip")
f(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['llvm', 'opencl']:
check_device(device)
def test_clip():
verify_clip(1024, -127, 127, 'float32')
verify_clip(1024, -127, 127, 'int16')
verify_clip(1024, -127, 127, 'int8')
if __name__ == "__main__":
test_clip()
| [
"tqchen@users.noreply.github.com"
] | tqchen@users.noreply.github.com |
2d38dcb91332ff3a7c9d232d62866608fb719f06 | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/bwobsolete_helpers/PyGUI/PyGUIBase.py | 834a250a485148a54b8d4bd40344fe93be77ec21 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,952 | py | # 2017.08.29 21:44:03 Střední Evropa (letní čas)
# Embedded file name: scripts/client/bwobsolete_helpers/PyGUI/PyGUIBase.py
import BigWorld, GUI
import weakref
from bwdebug import *
from functools import partial
from Listener import Listenable
class PyGUIBase(object, Listenable):
def __init__(self, component = None):
Listenable.__init__(self)
self.component = component
self.eventHandler = None
self._parent = None
self.isActive = False
return
def active(self, state):
if state == self.isActive:
return
if not self.component:
return
self.isActive = state
if state:
if not self._parent:
GUI.addRoot(self.component)
else:
self._parent.addChild(self.component)
self.component.mouseButtonFocus = True
self.component.moveFocus = True
self.component.crossFocus = True
else:
if not self._parent:
GUI.delRoot(self.component)
else:
self._parent.delChild(self.component)
self.component.mouseButtonFocus = False
self.component.moveFocus = False
self.component.crossFocus = False
self.listeners.activated(state)
def _setparent(self, parent):
if self.isActive:
if not self._parent:
GUI.delRoot(self.component)
else:
self._parent.delChild(self.component)
if parent:
self._parent = weakref.proxy(parent)
else:
self._parent = parent
if self.isActive:
if not self._parent:
GUI.addRoot(self.component)
else:
self._parent.addChild(self.component)
def _getparent(self):
return self._parent
parent = property(_getparent, _setparent)
def getWindow(self):
import Window
if isinstance(self, Window.Window):
return self
elif self.component.parent and self.component.parent.script:
return self.component.parent.script.getWindow()
else:
return
return
def toggleActive(self):
self.active(not self.isActive)
def setEventHandler(self, eh):
self.eventHandler = eh
def doLayout(self, parent):
for name, child in self.component.children:
child.script.doLayout(self)
def setToolTipInfo(self, toolTipInfo):
self.toolTipInfo = toolTipInfo
def removeToolTipInfo(self):
if hasattr(self, toolTipInfo):
del self.toolTipInfo
def focus(self, state):
pass
def mouseButtonFocus(self, state):
pass
def handleInputLangChangeEvent(self):
return False
def handleKeyEvent(self, event):
return False
def handleMouseEvent(self, comp, event):
return False
def handleMouseButtonEvent(self, comp, event):
window = self.getWindow()
if window:
window.listeners.windowClicked()
return False
def handleMouseClickEvent(self, component):
return False
def handleMouseEnterEvent(self, comp):
if getattr(self, 'toolTipInfo', None):
import ToolTip
ToolTip.ToolTipManager.instance.setupToolTip(self.component, self.toolTipInfo)
return False
def handleMouseLeaveEvent(self, comp):
return False
def handleAxisEvent(self, event):
return False
def handleDragStartEvent(self, comp):
return False
def handleDragStopEvent(self, comp):
return False
def handleDragEnterEvent(self, comp, dragged):
return False
def handleDragLeaveEvent(self, comp, dragged):
return False
def handleDropEvent(self, comp, dropped):
return False
def handleIMEEvent(self, event):
return False
def onLoad(self, dataSection):
if dataSection.has_key('toolTipInfo'):
import ToolTip
self.toolTipInfo = ToolTip.ToolTipInfo()
self.toolTipInfo.onLoad(dataSection._toolTipInfo)
def onSave(self, dataSection):
if hasattr(self, 'toolTipInfo') and self.toolTipInfo is not None:
toolTipInfoSection = dataSection.createSection('toolTipInfo')
self.toolTipInfo.onSave(toolTipInfoSection)
return
def onBound(self):
for name, child in self.component.children:
if not child.script:
child.script = PyGUIBase(child)
raise isinstance(child.script, PyGUIBase) or AssertionError
self._bindEvents(self.__class__)
def _bindEvents(self, cls):
for name, function in cls.__dict__.iteritems():
if hasattr(function, '_PyGUIEventHandler'):
for componentName, eventName, args, kargs in function._PyGUIEventHandler:
if not callable(function):
raise AssertionError
component = self.component
for name in componentName.split('.'):
component = getattr(component, name, None)
if component is None:
break
component is None and ERROR_MSG("PyGUIEvent: '%s' has no component named '%s'." % (str(self), componentName))
continue
function = getattr(self, function.__name__)
setattr(component.script, eventName, partial(function, *args, **kargs))
for base in cls.__bases__:
self._bindEvents(base)
return
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\bwobsolete_helpers\PyGUI\PyGUIBase.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:44:03 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
24e24c1bb50cbbd0c3f4af14a06c6dcf353f6fe4 | 425db5a849281d333e68c26a26678e7c8ce11b66 | /LeetCodeSolutions/LeetCode_0252.py | ccf1daf59fe8c44bc1f9575209b20c8851cafb90 | [
"MIT"
] | permissive | lih627/python-algorithm-templates | e8092b327a02506086414df41bbfb2af5d6b06dc | a61fd583e33a769b44ab758990625d3381793768 | refs/heads/master | 2021-07-23T17:10:43.814639 | 2021-01-21T17:14:55 | 2021-01-21T17:14:55 | 238,456,498 | 29 | 8 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | from typing import List
class Solution:
def canAttendMeetings(self, intervals: List[List[int]]) -> bool:
if not intervals:
return True
intervals.sort()
end = -1
for interval in intervals:
if interval[0] < end:
return False
end = max(end, interval[1])
return True
| [
"lih627@outlook.com"
] | lih627@outlook.com |
213840862cac4a5e0577be766248cd201e560514 | be6b4181de09a50ccbd7caea58dbdbcbf90602be | /numba/servicelib/threadlocal.py | 2ad13112109b26cdbb93c40202dffb8edc1a6bf4 | [
"BSD-2-Clause"
] | permissive | pombreda/numba | 6490c73fcc0ec5d93afac298da2f1068c0b5ce73 | 25326b024881f45650d45bea54fb39a7dad65a7b | refs/heads/master | 2021-01-15T10:37:08.119031 | 2014-11-06T22:32:48 | 2014-11-06T22:32:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | """
Implements:
- Threadlocal stack
"""
from __future__ import print_function, absolute_import, division
import threading
class TLStack(object):
def __init__(self):
self.local = threading.local()
@property
def stack(self):
try:
# Retrieve thread local stack
return self.local.stack
except AttributeError:
# Initialize stack for the thread
self.local.stack = []
def push(self, item):
self.stack.append(item)
def pop(self):
return self.stack.pop()
@property
def top(self):
return self.stack[-1]
@property
def is_empty(self):
return not self.stack
def __bool__(self):
return not self.is_empty
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return len(self.stack)
def clear(self):
self.__init__()
| [
"michael.lam.sk@gmail.com"
] | michael.lam.sk@gmail.com |
43866c23e7957b764f0b579688d0275579b2fd44 | ef2e2a40c9e03173ee936f6672a90a794db5b2a0 | /app/search.py | 5dbfba175888cd77005d66737abc91a5e3083ee9 | [] | no_license | crazynayan/flask-tutorial | fc2fbc3bd7e7f30d48dd2abce5ea05ef3168fc6b | 6e51323bf086cadd39a4860388e07b047b8c6fbe | refs/heads/master | 2022-12-13T23:13:08.832155 | 2019-10-30T12:16:54 | 2019-10-30T12:16:54 | 182,255,340 | 0 | 0 | null | 2022-12-08T05:01:38 | 2019-04-19T11:36:10 | Python | UTF-8 | Python | false | false | 969 | py | from flask import current_app
def add_to_index(index, model):
if not current_app.elasticsearch:
return
payload = {}
for field in model.__searchable__:
payload[field] = getattr(model, field)
current_app.elasticsearch.index(index=index, id=model.id, body=payload)
def remove_from_index(index, model):
if not current_app.elasticsearch:
return
current_app.elaseticsearch.delete(index=index, id=model.id)
def query_index(index, query, page, per_page):
if not current_app.elasticsearch:
return
query_body = {
'query': {
'multi_match': {
'query': query,
'fields': ['*'],
},
},
'from': (page - 1) * per_page,
'size': per_page,
}
search = current_app.elasticsearch.search(index=index, body=query_body)
ids = [int(hit['_id']) for hit in search['hits']['hits']]
return ids, search['hits']['total']['value'] | [
"nayan@crazyideas.co.in"
] | nayan@crazyideas.co.in |
7b98acc53d76f81399ffb120b7e715a6c5608d0a | 00c9701cfc7b1b0bff6a72319d02cd59dc1eca9c | /ros_ws/src/regulation_imugps/src/regulation_from_err_alpha_dist.py | 146f95c8f23cd620b7aa61a5194cd0db3ac032a3 | [] | no_license | EnstaBretagneClubRobo/GuerledanDamScanning | ae80340556898ec6a39395e11975e21272c16c31 | 4309412f0dc883db3e5e4415539f38b5baaa762d | refs/heads/master | 2021-06-14T16:11:16.907465 | 2017-03-03T14:10:51 | 2017-03-03T14:10:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | #!/usr/bin/env python
"""
This regulateur is just a template and publish a forward command only
"""
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import Float32
from math import atan, pi, tan
def update_err_d(msg):
global eD
eD = msg.data
def update_err_cap(msg):
global ecap
ecap = msg.data
rospy.init_node('regulation_cap')
cmd_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
imu_sub = rospy.Subscriber('err_d', Float32, update_err_d)
gps_sub = rospy.Subscriber('err_cap', Float32, update_err_cap)
# erreur en cap et en distance
ecap, eD = 0, 0
K = -3 / pi # rad/s
radius = 5 # largeur d'effet du suivi de ligne
v = -5.0 # todo trouver pourquoi
cmd = Twist()
rate = rospy.Rate(20) # il faut avoir une bonne frequence
while not rospy.is_shutdown():
# error = cap(/mur) - cap_desire
err = ecap - atan(eD / radius)
err = err / 2 # pour ramener de [-pi,pi] a [-pi/2,pi/2]
cmd.angular.z = K * atan(tan((err)))
print ecap, atan(eD)
cmd.linear.x = v
cmd_pub.publish(cmd)
rate.sleep()
| [
"ejalaa12@gmail.com"
] | ejalaa12@gmail.com |
ebbdd594ec1e0b143441c4a911fcf81481ed0acf | 4ae1879c21a4193da3df6ae740674ee0655a8beb | /drawDeviation.py | a8b9efe078feb123768f809991f2275a25cac77e | [] | no_license | cynerelee/collision-avoidance | 68bccce1a54009ce7b3bee1bf2adc571b6cde956 | c269b7040b68b91eb5e7e1134feb8363da1091f0 | refs/heads/master | 2023-07-09T02:40:23.760176 | 2023-06-24T03:44:02 | 2023-06-24T03:44:02 | 281,842,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import xlrd #读取excel的库
x=np.arange(0, 2.01,0.01)
#print(x)
#print(x.shape)
data1 = xlrd.open_workbook("deviation_k1.xlsx")
table1 = data1.sheet_by_index(0)
line=table1.col_values(0)
base=np.array(line)
base=base.T
resArray=[] #先声明一个空list
data = xlrd.open_workbook("deviation_k3.xlsx") #读取文件
table = data.sheet_by_index(0) #按索引获取工作表,0就是工作表1
for i in range(table.nrows): #table.nrows表示总行数
line=table.row_values(i) #读取每行数据,保存在line里面,line是list
resArray.append(line) #将line加入到resArray中,resArray是二维list
resArray=np.array(resArray) #将resArray从二维list变成数组
font1 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size':15,
}
font2 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size':10,
}
color=['#377eb8', '#ff7f00', '#4daf4a','#f781bf', '#a65628', '#984ea3','#999999', '#e41a1c']
alpha=0.6
figure, ax = plt.subplots()
# 设置matplotlib正常显示中文和负号
matplotlib.rcParams['font.sans-serif']=['SimHei'] # 用黑体显示中文
matplotlib.rcParams['axes.unicode_minus']=False # 正常显示负号
# 显示横轴标签
plt.xlabel("Time(s)",font1)
# 显示纵轴标签
plt.ylabel("Deviation(cm)",font1)
plt.axis([0, 2, 0, 6])
plt.tick_params(labelsize=15)
plt.xticks([0,0.2,0.4,0.6,0.8,1,1.2,1.4,1.6,1.8,2])
plt.yticks([0,1,2,3,4,5,6])
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
# 显示图标题
#plt.title("频数/频率分布直方图")
#plt.legend(loc = 'upper right',prop=font2)
plt.plot(x, base,alpha=0.6,label='Baseline',color=color[0],linewidth=2)
plt.plot(x, resArray[:,1],alpha=0.6,label='K2=0.1',color=color[1],linewidth=2)
plt.plot(x, resArray[:,2],alpha=0.6,label='K2=1',color=color[2],linewidth=2)
plt.plot(x, resArray[:,3],alpha=0.6,label='K2=5',color=color[3],linewidth=2)
plt.plot(x, resArray[:,4],alpha=0.6,label='K2=10',color=color[4],linewidth=2)
plt.legend(loc = 0,prop=font2)
plt.savefig('./Deviation_k3.png')
plt.show() | [
"l"
] | l |
3a2127cf485882ad716605f78202ae8536f46498 | f453897fccafc2278f959010c6bad52c7802a2fe | /sidebarUpdate.py | ec7becd648760176a127d1c08e6db75bb5c76b28 | [] | no_license | ColinHaley/Python | 4977c325c13652251386e5a5e3f65d55a3f13a07 | bbef9fc8c4e1d31fe5e1142cf7506fc4738295dd | refs/heads/master | 2021-01-25T08:28:17.231365 | 2018-05-09T21:46:32 | 2018-05-09T21:46:32 | 42,951,804 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,866 | py | """
__author__ = 'Colin Haley, aka Kazra'
__purpose__ = 'Update the /r/asov sidebar with online players from asov Vanilla'
Steps:
1. Create upload variables: [string]CSS, [string]Sidebar
2. Get current players
a. If 0:
i. Clear Sidebar Playerheads
ii. Set to "No Players Online."
ii. Exit()
b. If >= 1:
i. For each player online:
- If their img exists in /data && newer than GETDATE()-3:
1. Add Strings to CSS and Sidebar variables.
- If not:
1. If older than GETDATE()-7, delete old playerhead icon.
2. wget or python equivalent to ~/srv/_config/data/ their player head icon
3. rename from 32.png to playername.png
4. Upload image
- Update Users table with:
1. UPDATE Users set Timestamp = NOW() WHERE Username = 'playername'
# Other Resources
http://cravatar.us/head/__playername__/32.png
Even unclaimed names return a 'Steve' head, no error handling needed? Dangerzone
https://www.reddit.com/dev/api
#POST_api_upload_sr_img
#POST_api_delete_sr_img
https://github.com/reddit/reddit/wiki/OAuth2
# Mandatory External Libraries
Praw: https://gist.github.com/shrayasr/100005943
Mcstatus: https://github.com/Dinnerbone/mcstatus
"""
# Imports
import praw
import time
import datetime
from mcstatus import MinecraftServer
import urllib
#Static Variables
__clientID__ = 'redditClientID'
__secretkey__ = 'redditSecretKey'
__subreddit__ = 'subredditName'
__username__ = 'redditUsername'
__password__ = 'redditPassword'
__serveraddress__ = 'minecraftAddress'
__serverport__ = #RCON Port for Minecraft
__datadirectory__ = '/dir/to/location/to/store/playerheads'
# Section to display playerheads within on the sidebar on reddit.
__sidebarheader__ = '[](/STARTONLINEPLAYERS)'
__sidebarfooter__ = '[](/ENDONLINEPLAYERS)'
# Header for CSS to update playerheads online.
__cssheader__ = '/* END ONLINE PLAYER HEADS DO NOT DELETE OR MOVE FROM HEADER POSITION */'
def generate_css(playerName):
# return a string formatted "a[href="/playername"]:after { content: url(%%playername%%) }"
# change this to a .format(playername) at some later point.
return 'a[href="/' + playerName + ']:after { content: url(%%'+ playerName + '%%) }'
def generate_sidebar(playerName):
# return a string formatted "[](/playername)"
# change this to a .format(playerName) at some point.
return '[](/' + playerName + ')'
def clear_sidebar():
# Needs to iterate through players currently listed online and remove their image uploads.
# Requires open connection to Reddit through use of global 'r' variable.
sidebar = r.get_settings(__subreddit__)['Description']
clearString = sidebar[:sidebar.index(__sidebarheader__) + len(__sidebarheader__) + sidebar[sidebar.index(__sidebarfooter__):]
r.update_settings(r.get_subreddit(__subreddit__), description = clearString)
def get_css():
stylesheet = r.get_stylesheet(__subreddit__)
return stylesheet
def clear_css():
# Delete all CSS between two marker comments, using indexOf("str")
# Requires open connection to reddit via 'r' global
subCSS = get_css()
r.set_stylesheet(__subreddit__, [__header__:])
def upload_css_to_reddit(stringCSS):
# takes .join() list of generateCSS(playername) as a string for upload
r.set_stylesheet(__subreddit__, stringCSS)
def upload_sidebar_to_reddit(stringSidebar):
# takes .join() list of generateSidebar(playername) as a string for upload
def getCurrentPlayers():
server = MinecraftServer(__serveraddress__, __serverport__)
try:
query = server.query()
return {'Count': query.players.online, 'Players':query.players.names}
except:
exit()
def download_playerhead(playername):
downloadPath = 'http://cravatar.eu/head/' + playername + '/32.png'
savepath = __datadirectory__ + playername + '.png'
urllib.urlretrieve(downloadPath, savePath)
# grabs a player head from cravatar to the data folder.
def upload_image_to_reddit(playername):
__imagedir__ = __datadirectory__ + playername + '.png'
r.upload_image(__subreddit__, __imagedir__, playername)
def delete_image_from_reddit(playername):
r.delete_image(__subreddit__, name=playername, header=False)
def parse_players_from_sidebar()
# Get the players online from the server via RCON
# if unsure of the address use MinecraftServer.lookup()
server = MinecraftServer(__serveraddress__, __serverport__)
try:
query = server.query()
if query.players.online > 0:
#do stuff
else
#set sidebar to 'No Players Online'
clear_css()
clear_sidebar()
except:
exit()
#Define the Praw useragent
settings = r.get_settings(__subreddit__)
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
2c827b70acdad62ca67fd30e1824c1fba685a3ec | 492c1e1dabb84ec4efb874b3d9228d31a675a38f | /121.py | bd46672c3c29a00f05e67a8d9d5a65edbc8accd8 | [] | no_license | ksnt/leet | 65f3c36c8a524e1cc1a5d00bb7a840222ecc9dfe | 6680ff978b88d3c44e538b4d5f0e6805ed85f9cf | refs/heads/master | 2022-09-24T10:59:18.740314 | 2022-09-01T19:06:12 | 2022-09-01T19:06:12 | 136,970,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | import sys
class Solution:
def maxProfit(self,prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) == 0: return 0
min_price = sys.maxsize
max_profit = 0
length = len(prices)
for i in range(length):
if prices[i] < min_price:
min_price = prices[i]
elif prices[i] - min_price > max_profit:
max_profit = prices[i] - min_price
return max_profit | [
"ksn0215@gmail.com"
] | ksn0215@gmail.com |
33bd9813fab74f630b0d6986aa9f4747cd2d0f9b | 18f2d1458103e1aacaaa14d9ff52654da0154dc8 | /src/layers/cnn.py | a65eefba9fdcd3fd3a51a8020d43ef2cd3f172b7 | [] | no_license | yamad07/IADA | 4fbda5b2e7cdb5efd83f2bd2960bfb8dcfd0d455 | 7dbda1eb336f44e57567f4541e14b31304a4e381 | refs/heads/master | 2020-04-10T23:18:01.809883 | 2019-01-30T16:05:21 | 2019-01-30T16:05:21 | 161,347,800 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import torch.nn as nn
def conv_layer(in_dim, out_dim, kernel_size):
return nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, padding=int((kernel_size - 1)/2)),
nn.ELU(inplace=True),
nn.Conv2d(out_dim, out_dim, kernel_size=kernel_size, padding=int((kernel_size - 1)/2)),
nn.ELU(inplace=True),
nn.Conv2d(out_dim, out_dim, kernel_size=kernel_size, padding=int((kernel_size - 1)/2)),
nn.ELU(inplace=True),
nn.BatchNorm2d(out_dim),
nn.AvgPool2d(kernel_size=2, stride=2),
)
| [
"yuhsukeshootsfc@gmail.com"
] | yuhsukeshootsfc@gmail.com |
a82c891c8c753024768d78e5716329e714114205 | cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1 | /xlsxwriter/test/comparison/test_chart_drop_lines01.py | 6e303f1bb4c31e9ce82494adcc98a6d81795dacb | [
"BSD-2-Clause"
] | permissive | glasah/XlsxWriter | bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec | 1e8aaeb03000dc2f294ccb89b33806ac40dabc13 | refs/heads/main | 2023-09-05T03:03:53.857387 | 2021-11-01T07:35:46 | 2021-11-01T07:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_drop_lines01.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with drop down lines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [48034944, 48036864]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.set_drop_lines()
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
440d85991f4a5c63c993bfa5575e75c0fe80b2eb | f281d0d6431c1b45c6e5ebfff5856c374af4b130 | /DAY001~099/DAY25-BOJ1068-트리/shinjam.py | 7db78b4398a5df90c58f272225b3fb2e50d4feb0 | [] | no_license | tachyon83/code-rhino | ec802dc91dce20980fac401b26165a487494adb4 | b1af000f5798cd12ecdab36aeb9c7a36f91c1101 | refs/heads/master | 2022-08-13T09:10:16.369287 | 2022-07-30T11:27:34 | 2022-07-30T11:27:34 | 292,142,812 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from collections import defaultdict
N = int(input())
input_nodes = map(int, input().split())
del_node = int(input())
nodes = defaultdict(list)
stack = []
visited = [0] * N
for idx, val in enumerate(input_nodes):
if del_node in [idx, val]:
continue
if val == -1:
stack.append(idx)
continue
nodes[idx].append(val)
nodes[val].append(idx)
ret = 0
while stack:
node = stack.pop()
visited[node] = 1
leaf = True
for n in nodes[node]:
if not visited[n]:
stack.append(n)
leaf = False
if leaf:
ret += 1
print(ret)
| [
"noreply@github.com"
] | tachyon83.noreply@github.com |
4a59a6d730c7d42759eeb4c97d075bd0b74a5420 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/vns/rsvdevdomainrefconttodomainref.py | 6f6631bb9d8ebd61481610df7c86e13fd1a69120 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,979 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsVDevDomainRefContToDomainRef(Mo):
"""
"""
meta = NamedSourceRelationMeta("cobra.model.vns.RsVDevDomainRefContToDomainRef", "cobra.model.aaa.DomainRef")
meta.targetNameProps["name"] = "tnAaaDomainRefName"
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "vnsRsVDevDomainRefContToDomainRef"
meta.rnFormat = "rsVDevDomainRefContToDomainRef"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Relation from VDev DomainRef Container To AAA Domain Ref"
meta.writeAccessMask = 0x6000000000000001
meta.readAccessMask = 0x6000000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.vns.VDevDomainRefCont")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rsVDevDomainRefContToDomainRef', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 18098, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 18094, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1562
prop.defaultValueStr = "aaaDomainRef"
prop._addConstant("aaaDomainRef", None, 1562)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnAaaDomainRefName", "tnAaaDomainRefName", 18093, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnAaaDomainRefName", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
2599f43c702b477847beae310b71941347de3dfc | d5492bcc77824e29669400622fd89b1349c90caf | /python网络数据采集/my_爬虫_进阶_之路/scrapy框架/my_spiders/电商项目/阿里1688_淘宝_天猫_京东_折800_卷皮_拼多多/my_flask_server/tools/时间戳_to_时间.py | bb9790a02ba469733ed07993cf5d5bc247faef0e | [] | no_license | XCodeAny/python | d88980682ba4db839911a5de8c073fa33a63da80 | 35991daf6c7eff4197662b9d07cb9fcdee6a0c02 | refs/heads/master | 2021-08-30T20:00:14.231120 | 2017-12-19T07:55:15 | 2017-12-19T07:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py | # coding:utf-8
'''
@author = super_fazai
@File : 时间戳_to_时间.py
@Time : 2017/11/15 17:13
@connect : superonesfazai@gmail.com
'''
import time
def timestamp_to_regulartime(timestamp):
'''
将时间戳转换成时间
'''
# 利用localtime()函数将时间戳转化成localtime的格式
# 利用strftime()函数重新格式化时间
# 转换成localtime
time_local = time.localtime(int(timestamp))
# print(time_local)
# 转换成新的时间格式(2016-05-05 20:28:54)
dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
return dt
timestamp = 1511625600
dt = timestamp_to_regulartime(timestamp)
print(dt)
def is_recent_time(timestamp):
'''
返回是否在指定的日期差内
:param timestamp:
:return:
'''
time_1 = int(timestamp)
time_2 = time.time() # 当前的时间戳
time_1 = time.localtime(time_1)
time_2 = time.localtime(time_2)
if time_1.tm_year == time_2.tm_year:
if time_1.tm_mon >= time_2.tm_mon: # 如果目标时间的月份时间 >= 当前月份(月份合法, 表示是当前月份或者是今年其他月份)
if time_1.tm_mday >= time_2.tm_mday:
if time_1.tm_hour >= 8 and time_1.tm_hour <= 16:
print('合法时间')
# diff_days = abs(time_1.tm_mday - time_2.tm_mday)
return True
else:
print('该小时在8点到16点以外,此处不处理跳过')
return False
else:
print('该日时间已过期, 此处跳过')
return False
else: # 月份过期
print('该月份时间已过期,此处跳过')
return False
else:
print('非本年度的限时秒杀时间,此处跳过')
return False
# while True:
# timestamp = input('请输入要判断的时间戳: ')
# print(is_recent_time(timestamp)) | [
"superonesfazai@gmail.com"
] | superonesfazai@gmail.com |
44bf8f5d04ab2ef20b3544249cd1b6392eb19290 | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /w3schools/python/reference/builtInFunctions.py | b9e411f63673bbb33d19faf1d68a200cdb99c7a9 | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | Python | false | false | 3,948 | py | # Built in Functions
# abs()-Returns the absolute value of a number
print(abs(-7.52))
print(abs(3+5j))
# all()-Returns True if all items in an iterable object are true
mylist = [True, True, True]
print(all(mylist)) # True
print(all([1, 1, 1])) # True
print(all([0, 1, 1])) # False
print(all([])) # True
print(all((0, True, False))) # False
# any()-Returns True if any item in an iterable object is true
"""ascii()-Returns a readable version of an object.
Replaces none-ascii characters with escape character"""
# bin()-Returns the binary version of a number
# bool()-Returns the boolean value of the specified object
# bytearray()-Returns an array of bytes
# bytes()-Returns a bytes object
# callable()-Returns True if the specified object is callable, otherwise False
# chr()-Returns a character from the specified Unicode code.
# classmethod()-Converts a method into a class method
# compile()-Returns the specified source as an object, ready to be executed
# complex()-Returns a complex number
"""
delattr()-Deletes the specified attribute
(property or method) from the specified object
"""
# dict()-Returns a dictionary (Array)
# dir()-Returns a list of the specified object's properties and methods
"""
divmod()-Returns the quotient and the remainder
when argument1 is divided by argument2
"""
"""
enumerate()-Takes a collection (e.g. a tuple)
and returns it as an enumerate object
"""
# eval()-Evaluates and executes an expression
# exec()-Executes the specified code (or object)
# filter()-Use a filter function to exclude items in an iterable object
# float()-Returns a floating point number
# format()-Formats a specified value
# frozenset()-Returns a frozenset object
# getattr()-Returns the value of the specified attribute (property or method)
# globals()-Returns the current global symbol table as a dictionary
"""hasattr()-Returns True if the specified object
has the specified attribute (property/method)"""
# hash()-Returns the hash value of a specified object
# help()-Executes the built-in help system
# hex()-Converts a number into a hexadecimal value
# id()-Returns the id of an object
# input()-Allowing user input
# int()-Returns an integer number
"""isinstance()-Returns True if a specified object
is an instance of a specified object"""
"""issubclass()-Returns True if a specified class is
a subclass of a specified object"""
# iter()-Returns an iterator object
# len()-Returns the length of an object
# list()-Returns a list
# locals()-Returns an updated dictionary of the current local symbol table
"""map()-Returns the specified iterator with
the specified function applied to each item"""
# max()-Returns the largest item in an iterable
# memoryview()-Returns a memory view object
# min()-Returns the smallest item in an iterable
# next()-Returns the next item in an iterable
# object()-Returns a new object
# oct()-Converts a number into an octal
# open()-Opens a file and returns a file object
# ord()-Convert an integer representing the Unicode of the specified character
# pow()-Returns the value of x to the power of y
# print()-Prints to the standard output device
# property()-Gets, sets, deletes a property
"""range()-Returns a sequence of numbers,
starting from 0 and increments by 1 (by default)"""
# repr()-Returns a readable version of an object
# reversed()-Returns a reversed iterator
# round()-Rounds a numbers
# set()-Returns a new set object
# setattr()-Sets an attribute (property/method) of an object
# slice()-Returns a slice object
# sorted()-Returns a sorted list
# staticmethod()-Converts a method into a static method
# str()-Returns a string object
# sum()-Sums the items of an iterator
# super()-Returns an object that represents the parent class
# tuple()-Returns a tuple
# type()-Returns the type of an object
# vars()-Returns the __dict__ property of an object
# zip()-Returns an iterator, from two or more iterators
| [
"sagarnikam123@gmail.com"
] | sagarnikam123@gmail.com |
05bf10e915b53d57bb3f0174801892d61daffed8 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/search/azure-search-documents/azure/search/documents/_internal/_generated/aio/__init__.py | fa69578ea7f244621643bd7e1b4c113301d9ff0d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 552 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._search_index_client import SearchIndexClient
__all__ = ['SearchIndexClient']
| [
"noreply@github.com"
] | yunhaoling.noreply@github.com |
ae18e15d31cb04495f56ec8136afcdb8cbf22861 | 6ecf8227cc63ea5c8f05fdd6a7d28b3167119367 | /blueking_forum/wsgi.py | 9b85fd8c45ff19aed7455d4ee3ba00e35d2a3b0a | [] | no_license | doraemonext/blueking_forum | 5ad0f46780e785a5af4db6f171654e351f509aa1 | f5737dcdeaef15c37b37a0988aa1be98f6283834 | refs/heads/master | 2020-12-28T21:29:19.982785 | 2015-11-04T04:15:20 | 2015-11-04T04:15:20 | 44,859,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for blueking_forum project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blueking_forum.settings")
application = get_wsgi_application()
| [
"doraemonext@gmail.com"
] | doraemonext@gmail.com |
9d8eef47748cb50afa81f15fa27c8d75bfaca146 | 08351ac650385e2ee0f4fc08ab8ef0978bc5bf3c | /Module2_HTTP/Request_response/Request.py | 981163757b7ae56b101453c505885d2f3f2dcdcd | [] | no_license | tertiarycourses/PythonNetworkingTraining | d3c02488e91d318874558130a89fb112a2c95d55 | 9c5f223a4b83d21a791ac0d322306c3a78c4122f | refs/heads/master | 2019-07-13T07:59:49.241235 | 2017-05-11T14:48:19 | 2017-05-11T14:48:19 | 83,748,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,087 | py | #Requests with urllib
# from urllib.request import urlopen
# from urllib.request import Request
# response = urlopen('http://www.debian.org')
# print(response)
# print(response.readline())
# ##response object
# print(response.url)
# print(response.status)
# print(response.headers['content-type'])
#response = urlopen('http://www.debian.org')
#print(response.read(50))
#response = urlopen('http://www.debian.org')
#print(response.read())
##print(response.read())
##Status Code
#print(response.status)
#-------------------------------------
#custom request
#req = Request('http://www.debian.org')
#req.add_header('Accept-Language', 'sv')
#response = urlopen(req)
#print(response.readlines()[:5])
#----------------------------------------
#Content Compression
#with decompression cannot see data
#from urllib.request import Request
#from urllib.request import urlopen
#req = Request('http://www.debian.org')
#req.add_header('Accept-Encoding', 'gzip')
#response = urlopen(req)
#print(response.getheader('Content-Encoding'))
#print(response.read())
#With Decompression can view data
#from urllib.request import Request
#from urllib.request import urlopen
#import gzip
#req = Request('http://www.debian.org')
#req.add_header('Accept-Encoding', 'gzip')
#response = urlopen(req)
#content = gzip.decompress(response.read())
#result=content.splitlines()[:5]
#print(result)
#--------------------------------------
#Content Negotiation
#from urllib.request import urlopen
#import gzip
#req = Request('http://www.debian.org')
#req.add_header('Accept-Content-Type', 'text/plain')
#response = urlopen(req)
#content = response.read()
#result=content.splitlines()[:5]
#print(result)
#-------------------------------------------
#User Agent
#from urllib.request import Request
#from urllib.request import urlopen
#req = Request('http://www.debian.org')
#req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64;rv:24.0) Gecko/20140722 Firefox/24.0 Iceweasel/24.7.0')
#response = urlopen(req)
#print(response.readline())
#---------------------------------------------
#Cookie
#from http.cookiejar import CookieJar
#cookie_jar = CookieJar()
#from urllib.request import build_opener, HTTPCookieProcessor
#opener = build_opener(HTTPCookieProcessor(cookie_jar))
#opener.open('http://www.github.com')
#print(len(cookie_jar))
#cookies = list(cookie_jar)
#print(cookies)
#---------------------------------------------\
#Redirect
#from urllib.request import Request
#from urllib.request import urlopen
#req = Request('http://www.gmail.com')
#response = urlopen(req)
#print(response.url)
#print(req.redirect_dict)
#---------------------------------------
#HTTP Methods
#GET
import requests
response = requests.get('http://www.debian.org')
print(response.content)
print(response.status_code)
#POST
# import requests
# r = requests.post("http://bugs.python.org", data={'number': 12524, 'type': 'issue', 'action': 'show'})
# print(r.status_code, r.reason)
# print(r.text)
| [
"angch@tertiaryinfotech.com"
] | angch@tertiaryinfotech.com |
e223b08659d04f02b9ff57fd9cc627a0bfbc4420 | 63ba933a294865f65409635f62e0f1d59f725f37 | /src/arrays/bagOfTokensScore.py | 86ce1032d9eb0987f1da6b22e658f67679b0f34d | [
"CC0-1.0"
] | permissive | way2arun/datastructures_algorithms | fc4302bdbb923ef8912a4acf75a286f2b695de2a | 4ea4c1579c28308455be4dfa02bd45ebd88b2d0a | refs/heads/master | 2021-12-07T04:34:35.732026 | 2021-09-30T12:11:32 | 2021-09-30T12:11:32 | 203,658,808 | 1 | 0 | null | 2020-08-08T15:55:09 | 2019-08-21T20:23:46 | Python | UTF-8 | Python | false | false | 2,716 | py | """
Bag of Tokens
You have an initial power of P, an initial score of 0, and a bag of tokens where tokens[i] is the value of the ith token (0-indexed).
Your goal is to maximize your total score by potentially playing each token in one of two ways:
If your current power is at least tokens[i], you may play the ith token face up, losing tokens[i] power and gaining 1 score.
If your current score is at least 1, you may play the ith token face down, gaining tokens[i] power and losing 1 score.
Each token may be played at most once and in any order. You do not have to play all the tokens.
Return the largest possible score you can achieve after playing any number of tokens.
Example 1:
Input: tokens = [100], P = 50
Output: 0
Explanation: Playing the only token in the bag is impossible because you either have too little power or too little score.
Example 2:
Input: tokens = [100,200], P = 150
Output: 1
Explanation: Play the 0th token (100) face up, your power becomes 50 and score becomes 1.
There is no need to play the 1st token since you cannot play it face up to add to your score.
Example 3:
Input: tokens = [100,200,300,400], P = 200
Output: 2
Explanation: Play the tokens in this order to get a score of 2:
1. Play the 0th token (100) face up, your power becomes 100 and score becomes 1.
2. Play the 3rd token (400) face down, your power becomes 500 and score becomes 0.
3. Play the 1st token (200) face up, your power becomes 300 and score becomes 1.
4. Play the 2nd token (300) face up, your power becomes 0 and score becomes 2.
Constraints:
0 <= tokens.length <= 1000
0 <= tokens[i], P < 104
"""
from collections import deque
from typing import List
class Solution:
def bagOfTokensScore(self, tokens: List[int], P: int) -> int:
# Solution 1 - 64 ms
"""
q = deque(sorted(tokens))
res = 0
while q and P >= q[0]:
P -= q.popleft()
res += 1
if q and len(q) > 1 and P < q[0]:
res -= 1
P += q.pop()
return res
"""
# Solution 2 - 40 ms
tokens.sort()
if not tokens or P < tokens[0]:
return 0
score = 0
left, right = 0, len(tokens) - 1
while left <= right:
if P >= tokens[left]:
P -= tokens[left]
left += 1
score += 1
else:
if right - left > 1:
P += tokens[right]
right -= 1
score -= 1
else:
break
return score
# Main Call
tokens = [100, 200]
P = 150
solution = Solution()
print(solution.bagOfTokensScore(tokens, P))
| [
"way2aru@yahoo.com"
] | way2aru@yahoo.com |
a8b32038a3ade070c8f67b3eed0e66408c072e48 | 25d4c31d5ebe470118b14beb84f3cd1e53d99c15 | /01_Tutorials/PyQt5_GUI_Tutorial/09_2_Tutorial_Progressbar_Button.py | 195496bbd802cc5cf6756f04db46337e8a71d385 | [] | no_license | daltdoerfer/Python_Templates-1 | ea4b59489feb7b7617e81b7c94d4375dbf25def3 | c2471cebeaf20bbfdfd3fd263d458e5a67ad8d1e | refs/heads/master | 2023-05-10T15:07:10.109280 | 2021-06-08T06:45:53 | 2021-06-08T06:45:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | # Dieses Tutorial beinhaltet das einfügen von:
# Progressbar mit ButtonS und (Multi-)Threading (Programm muss weiterlaufen und lagert andere Prozesse aus)
# https://riptutorial.com/pyqt5/example/29500/basic-pyqt-progress-bar
import sys
import time
from PyQt5 import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
TIME_LIMIT = 100 # Ausgelagertes TIME Limit, da mehrere Klassen darauf zugreifen
class External(QThread):
"""
Runs a counter thread.
"""
countChanged = pyqtSignal(int)
def run(self):
count = 0
while count < TIME_LIMIT:
count += 1
time.sleep(1)
self.countChanged.emit(count)
class Fenster(QDialog): # Wichtig für Status und Menübar von QMainWindow erben
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
#################################
# Progressbar
#################################
self.pb1 = QProgressBar(self)
self.pb1.setGeometry(0, 0, 300, 25)
self.pb1.move(50, 50)
self.pb1.setMaximum(100)
self.bt1 = QPushButton("Start", self)
self.bt1.move(50, 75)
self.bt1.clicked.connect(self.onButtonClick)
#################################
# Allgmeine Fenster Config (Hauptfenster)
#################################
self.setGeometry(50, 50, 1000, 500)
self.setWindowTitle("My First GUI")
self.setWindowIcon(QIcon("icon.png"))
self.show()
def onButtonClick(self):
self.calc = External()
self.calc.countChanged.connect(self.onCountChanged)
self.calc.start()
def onCountChanged(self, value):
self.pb1.setValue(value)
if __name__ == "__main__":
app = QApplication(sys.argv) # Neue Default-Application anlegen
w = Fenster() # Einfaches Fenster bauen -> Neue Instanz w
sys.exit(app.exec_()) # Beendet Python Skript wenn Fenster geschlossen wird | [
"daltdoerfer@yahoo.com"
] | daltdoerfer@yahoo.com |
78b480c59e1129fef3f5117392043d5251f5e5cb | 7c551e749064b25af706b9167211050f8c6ad0a9 | /signatures/windows/trojan_rovnix.py | f9b6b29446060b6a111cd040ea82c6e53ff79178 | [] | no_license | dashjuvi/Cuckoo-Sandbox-vbox-win7 | fa382828b4895c5e1ee60b37a840edd395bf1588 | a3a26b539b06db15176deadeae46fc0476e78998 | refs/heads/master | 2020-03-12T08:33:06.231245 | 2019-01-14T23:09:02 | 2019-01-14T23:09:02 | 130,529,882 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Rovnix(Signature):
name = "rovnix"
description = "Rovnix Trojan"
severity = 3
categories = ["banker", "trojan"]
authors = ["Mikael Keri"]
minimum = "2.0"
files_re = [
".*\\\\AppData\\\\Local\\\\Temp\\\\L[0-9]{9}",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\RSA[0-9]{9}.dll",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\KEYS\\\\CFG[0-9]{9}.dll",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\KEYS\\\\DB[0-9]{9}.dll",
]
regkeys_re = [
".*\\\\Software\\\\Microsoft\\\\Installer\\\\Products\\\\B[0-9]{9}",
]
mutexes_re = [
".*UACNTFS[0-9]{9}",
".*INSNTFS[0-9]{9}",
".*BDNTFS[0-9]{9}",
".*PL6NTFS[0-9]{9}",
".*PL1NTFS[0-9]{9}",
]
def on_complete(self):
for indicator in self.mutexes_re:
for mutex in self.check_mutex(pattern=indicator, regex=True, all=True):
self.mark_ioc("mutex", mutex)
for indicator in self.regkeys_re:
for regkey in self.check_key(pattern=indicator, regex=True, all=True):
self.mark_ioc("registry", regkey)
for indicator in self.files_re:
for regkey in self.check_file(pattern=indicator, regex=True, all=True):
self.mark_ioc("file", regkey)
return self.has_marks()
| [
"diegovm14@gmail.com"
] | diegovm14@gmail.com |
e243451ce164809caa479471221ee886f2b8c8da | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/validators/choropleth/unselected/__init__.py | 6b386c7525f160cb5f23f28d158a37c663b847da | [
"MIT"
] | permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 684 | py |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='marker',
parent_name='choropleth.unselected',
**kwargs
):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Marker'),
data_docs=kwargs.pop(
'data_docs', """
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
"""
),
**kwargs
)
| [
"noreply@github.com"
] | Jonathan-MW.noreply@github.com |
75b7140688bd7f5663275f7481f344ba0990f781 | 4e04f819e376c3fba7b6a57c228c289b2c3dde12 | /compass/ocean/tests/global_ocean/mesh/so12to60/dynamic_adjustment/__init__.py | c183fae208713987c10bf3bf3c959e87c5ac2da9 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | Rihui-L/compass | 65e88253f24240a4376a9f04c047c2756848a45a | 4446f76222be26996fc44569a2047bdfb22e33ff | refs/heads/master | 2023-06-19T12:45:30.190857 | 2021-07-20T19:48:43 | 2021-07-20T19:48:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,066 | py | from compass.ocean.tests.global_ocean.dynamic_adjustment import \
DynamicAdjustment
from compass.ocean.tests.global_ocean.forward import ForwardStep
class SO12to60DynamicAdjustment(DynamicAdjustment):
"""
A test case performing dynamic adjustment (dissipating fast-moving waves)
from an initial condition on the SO12to60 MPAS-Ocean mesh
Attributes
----------
restart_filenames : list of str
A list of restart files from each dynamic-adjustment step
"""
def __init__(self, test_group, mesh, init, time_integrator):
"""
Create the test case
Parameters
----------
test_group : compass.ocean.tests.global_ocean.GlobalOcean
The global ocean test group that this test case belongs to
mesh : compass.ocean.tests.global_ocean.mesh.Mesh
The test case that produces the mesh for this run
init : compass.ocean.tests.global_ocean.init.Init
The test case that produces the initial condition for this run
time_integrator : {'split_explicit', 'RK4'}
The time integrator to use for the forward run
"""
if time_integrator != 'split_explicit':
raise ValueError('{} dynamic adjustment not defined for {}'.format(
mesh.mesh_name, time_integrator))
restart_times = ['0001-01-03_00:00:00', '0001-01-07_00:00:00',
'0001-01-11_00:00:00', '0001-01-21_00:00:00']
restart_filenames = [
'restarts/rst.{}.nc'.format(restart_time.replace(':', '.'))
for restart_time in restart_times]
super().__init__(test_group=test_group, mesh=mesh, init=init,
time_integrator=time_integrator,
restart_filenames=restart_filenames)
module = self.__module__
# first step
step_name = 'damped_adjustment_1'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=step_name,
subdir=step_name)
namelist_options = {
'config_run_duration': "'00-00-02_00:00:00'",
'config_dt': "'00:05:00'",
'config_btr_dt': "'00:00:20'",
'config_Rayleigh_friction': '.true.',
'config_Rayleigh_damping_coeff': '1.0e-4'}
step.add_namelist_options(namelist_options)
stream_replacements = {
'output_interval': '00-00-10_00:00:00',
'restart_interval': '00-00-02_00:00:00'}
step.add_streams_file(module, 'streams.template',
template_replacements=stream_replacements)
step.add_output_file(filename='../{}'.format(restart_filenames[0]))
self.add_step(step)
# second step
step_name = 'damped_adjustment_2'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=step_name,
subdir=step_name)
namelist_options = {
'config_run_duration': "'00-00-04_00:00:00'",
'config_dt': "'00:07:30'",
'config_btr_dt': "'00:00:20'",
'config_Rayleigh_friction': '.true.',
'config_Rayleigh_damping_coeff': '4.0e-5',
'config_do_restart': '.true.',
'config_start_time': "'{}'".format(restart_times[0])}
step.add_namelist_options(namelist_options)
stream_replacements = {
'output_interval': '00-00-10_00:00:00',
'restart_interval': '00-00-02_00:00:00'}
step.add_streams_file(module, 'streams.template',
template_replacements=stream_replacements)
step.add_input_file(filename='../{}'.format(restart_filenames[0]))
step.add_output_file(filename='../{}'.format(restart_filenames[1]))
self.add_step(step)
# third step
step_name = 'damped_adjustment_3'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=step_name,
subdir=step_name)
namelist_options = {
'config_run_duration': "'00-00-04_00:00:00'",
'config_dt': "'00:10:00'",
'config_btr_dt': "'00:00:20'",
'config_Rayleigh_friction': '.true.',
'config_Rayleigh_damping_coeff': '1.0e-5',
'config_do_restart': '.true.',
'config_start_time': "'{}'".format(restart_times[1])}
step.add_namelist_options(namelist_options)
stream_replacements = {
'output_interval': '00-00-10_00:00:00',
'restart_interval': '00-00-02_00:00:00'}
step.add_streams_file(module, 'streams.template',
template_replacements=stream_replacements)
step.add_input_file(filename='../{}'.format(restart_filenames[1]))
step.add_output_file(filename='../{}'.format(restart_filenames[2]))
self.add_step(step)
# final step
step_name = 'simulation'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=step_name,
subdir=step_name)
namelist_options = {
'config_run_duration': "'00-00-10_00:00:00'",
'config_do_restart': '.true.',
'config_start_time': "'{}'".format(restart_times[2])}
step.add_namelist_options(namelist_options)
stream_replacements = {
'output_interval': '00-00-10_00:00:00',
'restart_interval': '00-00-10_00:00:00'}
step.add_streams_file(module, 'streams.template',
template_replacements=stream_replacements)
step.add_input_file(filename='../{}'.format(restart_filenames[2]))
step.add_output_file(filename='../{}'.format(restart_filenames[3]))
self.add_step(step)
self.restart_filenames = restart_filenames
| [
"xylarstorm@gmail.com"
] | xylarstorm@gmail.com |
06dec5bffda4f9bce976bfa3abf34ab323768695 | c29de7ce2d91f572aeb4da56801de7a1dc034054 | /st2/experiments/cifar10/exp011.py | f2fd3d516a219c49e1c585326c4a98eaf1043f51 | [] | no_license | kzky/works | 18b8d754bfc2b1da22022926d882dfe92ea785e6 | b8708c305e52f924ea5a7071e0dfe5f2feb7a0a3 | refs/heads/master | 2021-01-10T08:04:44.831232 | 2018-03-01T15:09:47 | 2018-03-01T15:09:47 | 54,316,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,121 | py | import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla
from nnabla.contrib.context import extension_context
import numpy as np
import os
import time
import argparse
from st2.cifar10.cnn_model_011 import cnn_model_003, ce_loss, sr_loss, er_loss, \
GradScaleContainer
from st2.cifar10.datasets import Cifar10DataReader, Separator
"""
The same script as the `st` module but with nnabla.
- ConvPool-CNN-C (Springenberg et al., 2014, Salimans&Kingma (2016))
- Stochastic Regularization
- Entropy Regularization for the outputs before CE loss and SR loss
- Gradient scaling: just consider large gradients of g_u
"""
def categorical_error(pred, label):
"""
Compute categorical error given score vectors and labels as
numpy.ndarray.
"""
pred_label = pred.argmax(1)
return (pred_label != label.flat).mean()
def main(args):
# Settings
device_id = args.device_id
batch_size = 100
batch_size_eval = 100
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = n_train_data / batch_size
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model
## supervised
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l = nn.Variable((batch_size, m, h, w))
y_l = nn.Variable((batch_size, 1))
pred = cnn_model_003(ctx, x_l)
loss_ce = ce_loss(ctx, pred, y_l)
loss_er = er_loss(ctx, pred)
loss_supervised = loss_ce + loss_er
## stochastic regularization
x_u0 = nn.Variable((batch_size, m, h, w))
x_u1 = nn.Variable((batch_size, m, h, w))
pred_x_u0 = cnn_model_003(ctx, x_u0)
pred_x_u1 = cnn_model_003(ctx, x_u1)
loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1)
loss_er0 = er_loss(ctx, pred_x_u0)
loss_er1 = er_loss(ctx, pred_x_u1)
loss_unsupervised = loss_sr + loss_er0 + loss_er1
## evaluate
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
pred_eval = cnn_model_003(ctx, x_eval, test=True)
# Solver
with nn.context_scope(ctx):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
# Gradient Scale Container
gsc = GradScaleContainer(len(nn.get_parameters()))
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=True, #TODO: use F.image_augmentation
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
for i in range(n_iter):
# Get data and set it to the varaibles
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch()
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch()
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
# Train
loss_supervised.forward(clear_no_need_grad=True)
loss_unsupervised.forward(clear_no_need_grad=True)
solver.zero_grad()
loss_unsupervised.backward(clear_buffer=True)
gsc.scale_grad(ctx, nn.get_parameters())
loss_supervised.backward(clear_buffer=True)
## update
solver.update()
# Evaluate
if (i+1) % iter_epoch == 0:
# Get data and set it to the varaibles
x_data, y_data = data_reader.get_test_batch()
# Evaluation loop
ve = 0.
iter_val = 0
for k in range(0, len(x_data), batch_size_eval):
x_eval.d = x_data[k:k+batch_size_eval, :]
label = y_data[k:k+batch_size_eval, :]
pred_eval.forward(clear_buffer=True)
ve += categorical_error(pred_eval.d, label)
iter_val += 1
msg = "Epoch:{},ElapsedTime:{},Acc:{:02f}".format(
epoch,
time.time() - st,
(1. - ve / iter_val) * 100)
print(msg)
st = time.time()
epoch +=1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--device_id", "-d", type=int, default=0)
parser.add_argument('--context', '-c', type=str,
default="cpu", help="Extension modules. ex) 'cpu', 'cuda.cudnn'.")
args = parser.parse_args()
main(args)
| [
"rkzfilter@gmail.com"
] | rkzfilter@gmail.com |
fb4055ed05dc497e1fbf506797c4d8371e6725f6 | bd053d2bf5444ab8f0b8b0ff56772fa75281e38d | /qchem/tests/test_observable.py | 09b74d615188f94b288e790d8fe1d3a885eb13cd | [
"Apache-2.0"
] | permissive | johannesjmeyer/pennylane | bcb762583e95537b04a9b38756369571f957d2e5 | 8f602312baea107d5248267fb3dc1593722810e0 | refs/heads/master | 2023-07-11T18:21:31.086858 | 2021-08-14T19:21:42 | 2021-08-14T19:21:42 | 341,190,636 | 3 | 1 | Apache-2.0 | 2021-06-16T09:01:58 | 2021-02-22T12:19:10 | Python | UTF-8 | Python | false | false | 4,359 | py | import os
import numpy as np
import pytest
from pennylane import qchem
from openfermion import FermionOperator, QubitOperator
t = FermionOperator("0^ 0", 0.5) + FermionOperator("1^ 1", -0.5)
v = (
FermionOperator("0^ 0^ 0 0", 0.25)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.5)
)
v1 = (
FermionOperator("0^ 0^ 0 0", 0.25)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("0^ 2^ 2 0", 0.25)
+ FermionOperator("0^ 3^ 3 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.25)
+ FermionOperator("2^ 0^ 0 2", 0.25)
)
v2 = (
FermionOperator("0^ 0^ 0 0", 0.5)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("0^ 2^ 2 0", 0.5)
+ FermionOperator("0^ 3^ 3 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.25)
+ FermionOperator("2^ 0^ 0 2", -0.25)
)
@pytest.mark.parametrize(
("fermion_ops", "init_term", "mapping", "terms_exp"),
[
(
[t, v],
1 / 4,
"bravyi_KITAEV",
{
(): (0.0625 + 0j),
((0, "Z"),): (-0.0625 + 0j),
((0, "Z"), (1, "Z")): (0.4375 + 0j),
((1, "Z"),): (-0.1875 + 0j),
},
),
(
[t, v],
1 / 4,
"JORDAN_wigner",
{
(): (0.0625 + 0j),
((0, "Z"),): (-0.0625 + 0j),
((1, "Z"),): (0.4375 + 0j),
((0, "Z"), (1, "Z")): (-0.1875 + 0j),
},
),
(
[t],
1 / 2,
"JORDAN_wigner",
{(): (0.5 + 0j), ((0, "Z"),): (-0.25 + 0j), ((1, "Z"),): (0.25 + 0j)},
),
(
[t],
0,
"JORDAN_wigner",
{((0, "Z"),): (-0.25 + 0j), ((1, "Z"),): (0.25 + 0j)},
),
(
[v1],
1 / 2,
"JORDAN_wigner",
{
(): (0.4375 + 0j),
((1, "Z"),): (0.125 + 0j),
((0, "Z"), (1, "Z")): (-0.125 + 0j),
((2, "Z"),): (-0.125 + 0j),
((0, "Z"), (2, "Z")): (0.125 + 0j),
((0, "Z"),): (0.0625 + 0j),
((3, "Z"),): (0.0625 + 0j),
((0, "Z"), (3, "Z")): (-0.0625 + 0j),
},
),
(
[v2],
1 / 4,
"bravyi_KITAEV",
{
(): (0.125 + 0j),
((0, "Z"), (1, "Z")): (0.125 + 0j),
((1, "Z"),): (-0.125 + 0j),
((2, "Z"),): (-0.0625 + 0j),
((0, "Z"), (2, "Z")): (0.0625 + 0j),
((1, "Z"), (2, "Z"), (3, "Z")): (0.0625 + 0j),
((0, "Z"), (1, "Z"), (2, "Z"), (3, "Z")): (-0.0625 + 0j),
((0, "Z"),): (0.125 + 0j),
},
),
],
)
def test_observable(fermion_ops, init_term, mapping, terms_exp, custom_wires, monkeypatch):
r"""Tests the correctness of the 'observable' function used to build many-body observables.
The parametrized inputs `terms_exp` are `.terms` attribute of the corresponding
`QubitOperator. The equality checking is implemented in the `qchem` module itself
as it could be something useful to the users as well.
"""
res_obs = qchem.observable(
fermion_ops, init_term=init_term, mapping=mapping, wires=custom_wires
)
qubit_op = QubitOperator()
monkeypatch.setattr(qubit_op, "terms", terms_exp)
assert qchem._qubit_operators_equivalent(qubit_op, res_obs, wires=custom_wires)
msg1 = "Elements in the lists are expected to be of type 'FermionOperator'"
msg2 = "Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'"
@pytest.mark.parametrize(
("fermion_ops", "mapping", "msg_match"),
[
([FermionOperator("0^ 0", 0.5), "notFermionOperator"], "JORDAN_wigner", msg1),
([FermionOperator("0^ 0", 0.5)], "no_valid_transformation", msg2),
],
)
def test_exceptions_observable(fermion_ops, mapping, msg_match):
"""Test that the 'observable' function throws an exception if any element
in the list 'fermion_ops' is not a FermionOperator objector or if the
fermionic-to-qubit transformation is not properly defined."""
with pytest.raises(TypeError, match=msg_match):
qchem.observable(fermion_ops, mapping=mapping)
| [
"noreply@github.com"
] | johannesjmeyer.noreply@github.com |
c94b2c053a007e87154dc677ea8df2d8d6db02e4 | e63c1e59b2d1bfb5c03d7bf9178cf3b8302ce551 | /uri/uri_python/iniciante/p1038.py | e994b8738d0a17022bb596d11005b2a23996e826 | [] | no_license | GabrielEstevam/icpc_contest_training | b8d97184ace8a0e13e1c0bf442baa36c853a6837 | 012796c2ceb901cf7aa25d44a93614696a7d9c58 | refs/heads/master | 2020-04-24T06:15:16.826669 | 2019-10-08T23:13:15 | 2019-10-08T23:13:15 | 171,758,893 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | valor = input().split(" ")
codigo = int(valor[0])
quantidade = int(valor[1])
preco = [4, 4.5, 5, 2, 1.5]
print("Total: R$ %.2f" % (quantidade*preco[codigo-1]))
| [
"gabrielestevam@hotmail.com"
] | gabrielestevam@hotmail.com |
98e5bb02b2f1e5c29f9b110dae3b25cd10b004f1 | d75703c2083dfc508c5608c4c35167b67d1a4308 | /2nd Chapter/graphTwo.py | 13107201655ba9be1fc0423142010b1927106346 | [] | no_license | vubon/Python-core | e8159763d281152a1b64da3a0534899fd3def2b5 | a415ef3c6159f0c85afa3240a762a00b2c68bd02 | refs/heads/master | 2020-07-03T17:08:10.091827 | 2016-12-09T19:26:51 | 2016-12-09T19:26:51 | 67,540,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import networkx as nx
G=nx.Graph()
G.add_node("A")
G.add_node("B")
G.add_none("C")
G.add_edge("A","B")
G.add_edge("B", "C")
G.add_edge("C", "A")
print("Nodes: " + str(G.nodes()))
print("Edges: " + str(G.edge()))
| [
"vubon.roy@gmail.com"
] | vubon.roy@gmail.com |
354596a7e215dbda43d8b2a0e5becc1707e1fa44 | e3946d91dc5fe71989c2f4b6390232865fcb5d1b | /fjord/flags/tests/test_tasks.py | cc907a579b61d1e71e3621331f63e2dfa138d835 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | zeusintuivo/fjord | 61b632fd6df0e1b3508e628fe4f682a937cc0244 | 3bd227004d369df1fdc39f06acff12ebc8f0fe34 | refs/heads/master | 2021-01-16T18:28:52.564638 | 2014-09-24T21:02:51 | 2014-09-24T21:02:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | from mock import patch
from nose.tools import eq_
# These tests require that tasks be imported so that the post_save
# signal is connected. Don't remove this.
import fjord.flags.tasks # noqa
from fjord.base.tests import TestCase
from fjord.feedback.tests import ResponseFactory
from fjord.flags.spicedham_utils import get_spicedham, tokenize
class TestClassifyTask(TestCase):
def test_classify_task(self):
"""flags should be created if classifier returns True"""
with patch('fjord.flags.tasks.classify') as classify_mock:
classify_mock.return_value = True
# This creates the response and saves it which kicks off
# the classifier task. It should be classified as abuse.
resp1 = ResponseFactory(locale=u'en-US', description=u'ou812')
eq_(classify_mock.call_count, 1)
eq_(sorted([f.name for f in resp1.flag_set.all()]),
['abuse'])
def test_classify_false_task(self):
"""flags shouldn't be created if classifier returns False"""
with patch('fjord.flags.tasks.classify') as classify_mock:
classify_mock.return_value = False
# This creates the response and saves it which kicks off
# the classifier task. It should not be classified as
# abuse.
resp1 = ResponseFactory(locale=u'en-US', description=u'ou812')
eq_(classify_mock.call_count, 1)
eq_([f.name for f in resp1.flag_set.all()], [])
def test_ignore_non_english(self):
"""non-en-US responses should be ignored"""
with patch('fjord.flags.tasks.classify') as classify_mock:
# This response is not en-US, so classify should never get
# called.
resp1 = ResponseFactory(locale=u'es', description=u'ou812')
eq_(classify_mock.called, False)
eq_([f.name for f in resp1.flag_set.all()], [])
class TestClassification(TestCase):
def train(self, descriptions, is_abuse=True):
# Note: This is probably a cached Spicedham object.
sham = get_spicedham()
for desc in descriptions:
sham.train(tokenize(desc), match=is_abuse)
def test_abuse(self):
self.train([
'gross gross is gross gross gross browser',
'gross icky gross gross browser',
'gross is mcgrossy gross',
'omg worst gross',
'browser worst'
], is_abuse=True)
self.train([
'Firefox is super!',
'Great browser!',
'Super fast!',
'Not gross!',
'super not gross!'
], is_abuse=False)
# This creates the response and saves it which kicks off
# the classifier task. It should be classified as abuse.
resp = ResponseFactory(
locale=u'en-US', description=u'browser is gross!')
eq_(sorted([f.name for f in resp.flag_set.all()]),
['abuse'])
| [
"willkg@mozilla.com"
] | willkg@mozilla.com |
ff0180f0924a802c747f04609234a645c5b90d6f | 69c33fcad69a2e61cc60209401215530d033e712 | /Python/Python Basics/32.opr.py | dae93683c4b9860393bb38c99d87a73107461c88 | [] | no_license | KULDEEPMALIKM41/Practices | 7659b895ea959c7df2cdbc79c0b982b36f2bde63 | 193abe262ff281a384aac7895bb66dc39ee6e88d | refs/heads/master | 2023-08-17T11:01:11.694282 | 2021-09-30T08:12:41 | 2021-09-30T08:12:41 | 289,527,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # 4.Logical Operator => and , or, not
# and or not
# A B A and B | A B A or B | A not(A)
# F F F F F F T F
# F T F F T T
# T F F T F T F T
# T T T T T T
a,b,c=10,20,5
res= b>a and b>c
print('result : ',res)
a,b,c=10,5,15
res=a>b and a>c
print('Result and = ',res)
res=a>b or a>c
print('Result or = ',res)
res=not(a>b) and not(a>c)
print('Result not = ',res)
res=not(a>b and a>c)
print('Result and,not = ',res) | [
"Kuldeepmalikm41@gmail.com"
] | Kuldeepmalikm41@gmail.com |
42c2877963f0980cf4683bb135d63c3593ccd77c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_267/ch3_2020_04_10_19_31_46_547518.py | f2b28c9beb403cc065d37ef9eac91b50609cac83 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | import math
def calcula_gaussiana(x, mi, sigma):
a = sigma*((2*math.pi)**(1/2))
b = -0.5*((x-mi)/sigma)**2
gaussiana = ((1/a)*(math.exp**b))
return gaussiana | [
"you@example.com"
] | you@example.com |
459af06cd809435cbcaf4c1ecd35e0e3e713e427 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2981/60712/256536.py | 0d28d89f2d62c8f4b6a26f95305cd6fb99293d9d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py |
import re
p =list(map(int,input().split()))
s=re.split(r"([-])",input())
for i in range(len(s)):
if s[i]=='-':
pre = list(s[i-1])
pres =0
post=list(s[i+1])
posts=0
if 'a'<=pre[len(pre)-1]<='z':
pres=1
else:
pres=2
if 'a'<=post[0]<='z':
posts=1
else:
posts=2
if pres==posts and pre[len(pre)-1]<post[0]:
preascii=ord(pre[len(pre)-1])
postascii = ord(post[0])
if postascii - preascii>1:
s2=""
start=0
end=0
x=0
if p[2]!=2:
start = 1
end = postascii-preascii
x=1
else:
start=postascii-preascii-1
end=0
x=-1
for j in range(start,end,x):
for k in range(p[1]):
if p[0]==2 and pres==1:
s2=s2+chr(preascii+j).upper()
elif p[0]==3:
s2=s2+'*'
else:
s2=s2+chr(preascii+j)
s[i]=s2
else:
s[i]=''
print("".join(s))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
4e23d1b44a12c5b9a789d8eec13f30ebec60eef8 | 750c45da795fe15d7ef40d09660742b650631967 | /snippet_builder/settings.py | 7b0c8a5ce9b385cd0ef5bb27441918dd22218181 | [] | no_license | John-W-Stevens/django_snippet_builder | cd984b69b3499136c4757bbae11d3f4701ef132e | 58d2ecd2432d1c288969cffdd6de5c0ad546306e | refs/heads/master | 2022-11-12T06:48:35.642128 | 2020-07-06T17:18:49 | 2020-07-06T17:18:49 | 258,379,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | """
Django settings for snippet_builder project.
Generated by 'django-admin startproject' using Django 2.2.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd$h3%k&!&3hm^^#katx-5g+&mw=i)pm=0@(ot&ow9fga(uk$_#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'snippet_builder_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'snippet_builder.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'snippet_builder.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"john.william.stevens1@gmail.com"
] | john.william.stevens1@gmail.com |
a0857f8022a9592f8bee88c3e97a5859915ed831 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/Scaleform/daapi/settings/__init__.py | 9aa47f81067051a2e6f0d8d3d3e359503585fce2 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 494 | py | # 2016.11.19 19:49:21 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/settings/__init__.py
class BUTTON_LINKAGES(object):
BUTTON_BLACK = 'ButtonBlack'
BUTTON_RED = 'ButtonRed'
BUTTON_NORMAL = 'ButtonNormal'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\Scaleform\daapi\settings\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:49:21 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
6796605bf711e664aab04f2e9738a8154d274c11 | 5851bfab6684e49c808bcc724437a601107d17a8 | /cnova_api_lojista_v2/model/TicketStatus.py | daf055a860a0629849121960142851503970bdd2 | [] | no_license | ballke-dev/ViaVarejoSDK | 18e3224a9d1faebfa00803dd41d4e70fe392e51e | 90875423e4a5382faac926036de3cbc243a5c97f | refs/heads/master | 2020-04-17T17:57:12.034404 | 2019-01-21T11:55:24 | 2019-01-21T11:55:24 | 166,805,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | #!/usr/bin/env python
class TicketStatus:
def __init__(self):
self.swaggerTypes = {
'ticket_status': 'str'
}
self.attributeMap = {
'ticket_status': 'ticketStatus'
}
#Novo status desejado do Ticket. Fechado <strong> (closed) </strong> e Em Acompanhamento <strong> (attendance) </strong>
self.ticket_status = None # str
| [
"ti2@ballke.com.br"
] | ti2@ballke.com.br |
642d2eb59544d36fa4596d933f4e433abb98af6d | 3fc3c2707a4618f81cc308a15abeea11c3d0101e | /neural_network.py | 2b4d75fd9475c6b50031f7e57050c11e451422b3 | [] | no_license | Louis-Saglio/Connect4 | 734fdfcf8c68f452db03f7f91827a02a8ae9049a | bbf7538ebd03c9b3be996b915546951cde15d209 | refs/heads/master | 2023-07-15T09:27:31.423144 | 2021-09-04T19:35:05 | 2021-09-04T19:35:05 | 385,692,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | from __future__ import annotations
import random
from typing import Iterable
import numpy as np
def sigmoid(array):
return 1/(1+np.exp(-array))
def relu(array):
return np.maximum(0, array)
class NeuralNetwork:
def __init__(self, input_size: int, layers_size: Iterable[int]):
self.layers = []
for layer_size in layers_size:
self.layers.append(
{
"weights": np.random.random((layer_size, input_size)),
"bias": np.random.random(layer_size),
"activation": relu,
}
)
input_size = layer_size
def feedforward(self, input_data) -> np.ndarray:
for layer in self.layers:
input_data = np.dot(layer["weights"], input_data) + layer["bias"]
input_data = layer["activation"](input_data)
return input_data / np.sum(input_data)
def clone(self) -> NeuralNetwork:
new = NeuralNetwork(0, [])
for layer in self.layers:
new.layers.append(
{
"weights": layer["weights"].copy(),
"bias": layer["bias"].copy(),
"activation": layer["activation"],
}
)
return new
def mutate(self):
layer = random.choice(range(0, len(self.layers)))
neuron = random.choice(range(0, len(self.layers[layer]["weights"])))
weight = random.choice(range(0, len(self.layers[layer]["weights"][neuron])))
self.layers[layer]["weights"][neuron][weight] += (random.random() - 0.5) * 10
if __name__ == '__main__':
nn = NeuralNetwork(input_size=100, layers_size=[50, 25, 10])
data = np.random.random(100)
clone = nn.clone()
nn.mutate()
| [
"louis.saglio@sfr.fr"
] | louis.saglio@sfr.fr |
2aad9c9d2778ce0c1f3d99e883d37d9ca996fc08 | f1cb02057956e12c352a8df4ad935d56cb2426d5 | /LeetCode/1928. Minimum Cost to Reach Destination in Time/Solution.py | cdd9f72047c03732266a7e77c97893623d41598b | [] | no_license | nhatsmrt/AlgorithmPractice | 191a6d816d98342d723e2ab740e9a7ac7beac4ac | f27ba208b97ed2d92b4c059848cc60f6b90ce75e | refs/heads/master | 2023-06-10T18:28:45.876046 | 2023-05-26T07:46:42 | 2023-05-26T07:47:10 | 147,932,664 | 15 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | class Solution:
def minCost(self, maxTime: int, edges: List[List[int]], passingFees: List[int]) -> int:
# Time Complexity: O(V' + E')
# where V' = maxTime x V, and E' = maxTime x E
# Space Complexity: O(V')
# Construct an augmented graph, whose nodes are (node, time)
# and if there is an edge in the original graph from node1 to node2 with travel time t
# then there are edges in the augmented graph from (node1, time) to (node2, time - t)
# (if time >= t)
# The augmented graph is now a DAG, and this problem becomes a DP on DAG problem
adj_lists = {i: set() for i in range(len(passingFees))}
for start, end, time in edges:
adj_lists[start].add((end, time))
adj_lists[end].add((start, time))
return self.getCost(maxTime, 0, {}, adj_lists, passingFees)
def getCost(self, remain: int, node: int, dp, adj_lists, passingFees: List[int]) -> int:
if (remain, node) in dp:
return dp[(remain, node)]
if node == len(passingFees) - 1:
return passingFees[-1]
if remain == 0:
return -1
ret = -1
for neigh, time in adj_lists.get(node, []):
if remain >= time:
cand_cost = self.getCost(remain - time, neigh, dp, adj_lists, passingFees)
if cand_cost >= 0 and (ret == -1 or cand_cost < ret):
ret = cand_cost
if ret >= 0:
ret += passingFees[node]
dp[(remain, node)] = ret
return ret
| [
"nphamcs@gmail.com"
] | nphamcs@gmail.com |
bcf5a5f0b3af1dc80a764845106316ccaa7392fc | 3939c1fc17fc5ad77b28c3da3b18ac3aeafc0fa8 | /neighbour/migrations/0009_auto_20210726_2340.py | f38682d79eed9cc3ad4caba45fb83916b383eb65 | [
"MIT"
] | permissive | ObadiaH123/neighbour | 1bca1ba7e9ecbc330f4c8b9337b05bdba1b0e1da | e30085236ddb2048f751400805784241eec44d9f | refs/heads/master | 2023-06-27T19:08:46.054615 | 2021-07-26T22:34:30 | 2021-07-26T22:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # Generated by Django 3.1.7 on 2021-07-26 20:40
import cloudinary.models
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('neighbour', '0008_auto_20210726_2221'),
]
operations = [
migrations.CreateModel(
name='Emergency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, null=True)),
('image', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
],
),
migrations.AddField(
model_name='healthcenter',
name='image',
field=cloudinary.models.CloudinaryField(default=django.utils.timezone.now, max_length=255, verbose_name='image'),
preserve_default=False,
),
]
| [
"ronohkelvin99@gmail.com"
] | ronohkelvin99@gmail.com |
0db13be2b7ae900ae80f4762afe3fbd131182e3b | d4a4b42fc7ce9f88f241f884d1b8f9f227c92b33 | /examples/neq/loopunreach300/loopunreach300_1.py | da08d79d547395ba0c9a2859fa093f5647795141 | [] | no_license | Client-Specific-Equivalence-Checker/CLEVER | a5cabcc6a127c80b7e8193f885bca8e5cf64b204 | 44a9027be67dcd94e0b4a30eb3cb5e7aeb0ab163 | refs/heads/master | 2022-02-13T04:23:29.654278 | 2019-02-06T17:10:48 | 2019-02-06T17:10:48 | 114,670,863 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | def lib(a, b):
c = 0
if a < 0:
i = 1
while i <= a:
c += b
i += 1
return c
def loopunreach300(x):
if x >= 273 and x < 327:
return lib(x, 300)
return 0
| [
"fmorarocha@gmail.com"
] | fmorarocha@gmail.com |
c1c6117e9c201aada7c6b6791f1d3dbfd252238e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /N5JhvabK6DTD5t6gS_15.py | f6baaf716744a9bad75f70f01fe4407df3c0b5a2 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
import string
def markdown(symb):
def func(sentence, word):
return ' '.join([symb+w+symb if w.lower().translate(str.maketrans('', '', string.punctuation)) == word.lower() else w for w in sentence.split(' ')])
return func
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
2a06d8169969719f56f6c0c63ed1ca5648bc7854 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02959/s028217014.py | 14c06dba3326e2507be8d625653369abe7bef9af | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
ans = 0
for i in range(N+1):
if i >= 1:
p = min(A[i], B[i-1])
ans += p
A[i] -= p
if i < N:
p = min(A[i], B[i])
ans += p
B[i] -= p
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2c80a63e8968899fc9de36d41cef0107e0562572 | ee4db47ccecd23559b3b6f3fce1822c9e5982a56 | /Build Chatbots/ChunkVerbPhrase.py | a1dfee73cfa4abcf2db55bb45550a1293288388d | [] | no_license | meoclark/Data-Science-DropBox | d51e5da75569626affc89fdcca1975bed15422fd | 5f365cedc8d0a780abeb4e595cd0d90113a75d9d | refs/heads/master | 2022-10-30T08:43:22.502408 | 2020-06-16T19:45:05 | 2020-06-16T19:45:05 | 265,558,242 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from nltk import RegexpParser
from pos_tagged_oz import pos_tagged_oz
from vp_chunk_counter import vp_chunk_counter
# define verb phrase chunk grammar here
chunk_grammar = "VP: {<VB.*><DT>?<JJ>*<NN><RB.?>?}"
#chunk_grammar = "VP: {<DT>?<JJ>*<NN><VB.*><RB.?>?}"
# create RegexpParser object here
chunk_parser = RegexpParser(chunk_grammar)
# create a list to hold verb-phrase chunked sentences
vp_chunked_oz = list()
# create for loop through each pos-tagged sentence in pos_tagged_oz here
for pos_tagged_sentence in pos_tagged_oz:
# chunk each sentence and append to vp_chunked_oz here
vp_chunked_oz.append(chunk_parser.parse(pos_tagged_sentence))
# store and print the most common vp-chunks here
most_common_vp_chunks = vp_chunk_counter(vp_chunked_oz)
print(most_common_vp_chunks) | [
"oluchukwuegbo@gmail.com"
] | oluchukwuegbo@gmail.com |
0e4aaae67303557ecd576180c2a28859058ec15e | 0cb38adedbe3a5192076de420e1aa0fd10ae3311 | /returned_items/urls.py | 6100088eab96c5170acd4668bd1525d0d7c18808 | [] | no_license | fogcitymarathoner/rma | 73ada816b98f068b6c00b2e1fcf39461259453fa | 133d6026f99820d0702f0578b8a3b4574671f888 | refs/heads/master | 2021-01-11T00:32:47.797673 | 2016-10-10T18:34:54 | 2016-10-10T18:35:11 | 70,516,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from django.conf.urls import patterns, include, url
from returned_items.views import index
from returned_items.views import move_items
from returned_items.views import ReturnedItemCreateView
from returned_items.views import ReturnedItemUpdateView
from returned_items.views import ReturnedItemDeleteView
from returned_items.views import move_items_confirm
from django.contrib.auth.decorators import login_required
urlpatterns = patterns('',
url(r'^$', index, name='list_returned_items'),
url(r'move_items$', move_items, name='move_returned_items'),
url(r'move_items_confirm$', move_items_confirm, name='confirm_move_items'),
url(r'create/(?P<id>\d+)$', login_required(ReturnedItemCreateView.as_view()), name='create_returned_item'),
url(r'edit/(?P<id>\d+)$', login_required(ReturnedItemUpdateView.as_view()), name='edit_returned_item'),
url(r'delete/(?P<id>\d+)$', login_required(ReturnedItemDeleteView.as_view()), name='delete_returned_item'),
)
| [
"marc@fogtest.com"
] | marc@fogtest.com |
4ecdb5f970f3ac775afb8cb4bbf3db8350538c59 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_tb.py | d43de208d660ddec2bbd491b4d2a8ecbfce158ae | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py |
#calss header
class _TB():
def __init__(self,):
self.name = "TB"
self.definitions = [u'abbreviation for tuberculosis ', u'written abbreviation for terabyte ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
fd357530dfd0ab6b2300482b496fcc90edc6ae81 | 1491bc7c6c1a8e025f84e7ceaeb389f109b3d37c | /Eapp/modals/product.py | e04b28b97bcb9f6ce8df74429d25969063f27c9c | [] | no_license | Taraltinu/Second-project | 47516dbce9255e0b0a9452accce178e7a2a9ec55 | e052457afb1559b572331f5e62840f78d5b07b07 | refs/heads/master | 2022-12-29T18:15:19.251268 | 2020-10-02T17:40:20 | 2020-10-02T17:40:20 | 300,688,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | from django.db import models
from django.contrib.auth.models import User
from Eapp.modals.category import CategoryModel
class ProductModel(models.Model):
seller = models.ForeignKey(User,on_delete=models.CASCADE)
product_name = models.CharField(max_length=250)
product_Cat = models.ForeignKey(CategoryModel,on_delete=models.CASCADE)
product_price = models.CharField(max_length=10)
sale_price = models.CharField(max_length=10)
product_image = models.ImageField(upload_to="product/%y/%m/%d")
description = models.TextField()
quality = models.CharField(max_length=250,default="")
size = models.FloatField( default=0)
color = models.CharField(max_length=50,default="")
add_date = models.DateTimeField(auto_now_add=True,null=True,blank=True)
| [
"tinu1316@gmail.com"
] | tinu1316@gmail.com |
6abdc35a733f4947b3ff36352ec09f4ccde3faeb | a1cbf221a6befed3891d75c69e2a546effd2499d | /payroll/models.py | 12882e648d086a944362c8df582726c35acbbf23 | [] | no_license | Coder339/V-django-newCRM | 9a93efbb252ba814241076ece17088af8dd15935 | 2182266204f54d301b7c087a99627d441e00fe54 | refs/heads/master | 2022-12-24T15:12:47.081949 | 2020-08-24T12:15:13 | 2020-08-24T12:15:13 | 247,274,031 | 0 | 2 | null | 2022-12-08T04:19:35 | 2020-03-14T12:39:13 | Python | UTF-8 | Python | false | false | 2,281 | py | from django.db import models
from authentication.models import EmployeeProfile
class EmployeePackage(models.Model): # to be send
# months = (
# ('JAN','JAN'),('FEB','FEB'),('MAR','MAR'),('APR','APR'),
# ('MAY','MAY'),('JUN','JUN'),('JULY','JULY'),('AUG','AUG'),
# ('SEP','SEP'),('OCT','OCT'),('NOV','NOV'),('DEC','DEC'),
# )
Name = models.CharField(max_length=20,null=True)
empId = models.ForeignKey(EmployeeProfile,on_delete=models.CASCADE,null=True,default = 1)
packageId = models.CharField(max_length=20,null=True)
# packageId = models.ForeignKey(SalaryPackage,on_delete=models.CASCADE,null=True,editable=False)
salary = models.IntegerField() # paid_amount
# salaryMonth = models.CharField(max_length=20,choices=months,null=True)
dateOfPayment = models.DateField(null=True)
modeOfPayment = models.CharField(max_length=10)
unpaid_leaves_allowed = models.PositiveIntegerField()
paid_leaves_allowed = models.PositiveIntegerField()
comments = models.CharField(max_length=100,null=True)
def __str__(self):
return self.Name
class Meta:
verbose_name_plural = 'employeeSalary'
class MonthlySalary(models.Model): #dynamic
# userId = models.CharField(max_length=20, primary_key=True)
EmpId = models.ForeignKey(EmployeeProfile,on_delete=models.CASCADE,null=True)
salaryMonth = models.DateField(null=True)
salaryId = models.ForeignKey(EmployeePackage, on_delete=models.CASCADE,editable=False,null=True)
unpaid_leaves = models.PositiveIntegerField(null=True)
paid_leaves = models.PositiveIntegerField(null=True)
activeDays = models.PositiveIntegerField()
workingDays = models.PositiveIntegerField()
# paymentReceipt = models.ForeignKey(UserPaymentReceipt, on_delete=models.CASCADE)
total_Salary_Amount = models.PositiveIntegerField() # according to no. of days spent
def __str__(self):
return self.EmpId
class Meta:
verbose_name_plural = 'monthlySalary'
| [
"amanpreet.leanvia@gmail.com"
] | amanpreet.leanvia@gmail.com |
9d52bc51d884bd0bd422bf9fc7be75aa01e5af19 | 1538320b5419539879c76f923206753fc0746b4a | /proteus/tests/SWFlows/dam3Bumps.py | dc0732ed3e744a0559ff73677e586e6efa738a5a | [
"MIT"
] | permissive | dloney/proteus | 829169228221e2ca8bffad2c518d8d858da6af48 | 615cdf57f765b2e99bac904bb6eb71e39e58ab56 | refs/heads/master | 2020-06-30T00:05:53.816553 | 2019-08-01T07:41:15 | 2019-08-01T07:41:15 | 200,662,475 | 0 | 0 | MIT | 2019-08-05T13:38:18 | 2019-08-05T13:38:17 | null | UTF-8 | Python | false | false | 3,481 | py | from __future__ import division
from builtins import object
from past.utils import old_div
from proteus import *
from proteus.default_p import *
from proteus.mprans import SW2D
from proteus.mprans import SW2DCV
from proteus.Domain import RectangularDomain
import numpy as np
from proteus import (Domain, Context,
MeshTools as mt)
from proteus.Profiling import logEvent
import proteus.SWFlows.SWFlowProblem as SWFlowProblem
# *************************** #
# ***** GENERAL OPTIONS ***** #
# *************************** #
opts= Context.Options([
('sw_model',0,"sw_model = {0,1} for {SWEs,DSWEs}"),
("final_time",3.0,"Final time for simulation"),
("dt_output",1.0,"Time interval to output solution"),
("refinement",2,"Level of refinement"),
("cfl",0.33,"Desired CFL restriction"),
("reflecting_BCs",True,"Use reflecting BCs")
])
###################
# DOMAIN AND MESH #
###################
L=(75.0,30.0)
refinement = opts.refinement
domain = RectangularDomain(L=L)
# CREATE REFINEMENT #
nnx0=6
nnx = (nnx0-1)*(2**refinement)+1
nny = old_div((nnx-1),2)+1
he = old_div(L[0],float(nnx-1))
triangleOptions="pAq30Dena%f" % (0.5*he**2,)
######################
##### BATHYMETRY #####
######################
h0=10
a=3000
B=5
k=0.002
g = SWFlowProblem.default_physical_parameters['gravity']
p = old_div(np.sqrt(8*g*h0),a)
s = old_div(np.sqrt(p**2 - k**2),2.)
mannings = k
def bathymetry_function(X):
x = X[0]
y = X[1]
bump1 = 1-1./8*np.sqrt((x-30)**2+(y-6)**2)
bump2 = 1-1./8*np.sqrt((x-30)**2+(y-24)**2)
bump3 = 3-3./10*np.sqrt((x-47.5)**2+(y-15)**2)
return np.maximum(np.maximum(np.maximum(0.,bump1),bump2),bump3)
##############################
##### INITIAL CONDITIONS #####
##############################
class water_height_at_t0(object):
def uOfXT(self,X,t):
x = X[0]
if (x <= 16):
eta=1.875
else:
eta=0.
z = bathymetry_function(X)
return max(eta - z,0.)
class Zero(object):
def uOfXT(self,x,t):
return 0.0
# ********************************** #
# ***** Create mySWFlowProblem ***** #
# ********************************** #
outputStepping = SWFlowProblem.OutputStepping(opts.final_time,dt_output=opts.dt_output)
initialConditions = {'water_height': water_height_at_t0(),
'x_mom': Zero(),
'y_mom': Zero()}
boundaryConditions = {'water_height': lambda x,flag: None,
'x_mom': lambda x,flag: None,
'y_mom': lambda x,flag: None}
mySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=0,
cfl=0.33,
outputStepping=outputStepping,
structured=True,
he=he,
nnx=nnx,
nny=nny,
domain=domain,
initialConditions=initialConditions,
boundaryConditions=boundaryConditions,
reflectingBCs=opts.reflecting_BCs,
bathymetry=bathymetry_function)
mySWFlowProblem.physical_parameters['LINEAR_FRICTION']=0
mySWFlowProblem.physical_parameters['mannings']=0.02
| [
"cekees@gmail.com"
] | cekees@gmail.com |
05c338ef5fbde0852cfd563177a583f42c08bcd4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/308/85637/submittedfiles/testes.py | b124f9a58f1dbe095c14a4e1a353c59ee692bba3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n = int(input("Digite a quantidade de vezes: "))
for i in range (0, n+1, 1):
print('Olá mundo')
"""
visual = [[' ',' ', ' '], [' ', ' ',' '], [' ', ' ', ' ']]
for i in range(0, 10, 1):
a = str(input('Selecione a posição: '))
if i%2==0:
visual[int(a[0])][int(a[2])]='X'
else:
visual[int(a[0])][int(a[2])]='O'
for i in range (0, 3, 1):
print(str(visual[i][0]) + ' | '+ str(visual[i][1]) + ' | '+ str(visual[i][2]))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
cc2cdf4e5ff6349c49bcbc52e970a773bbc84e63 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/NISS/shamil_v3/fuel_management/wizard/fuel_slice_report.py | 65ec4386e868bb9ae0da2e2aba4d26c188ce0b93 | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,407 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2016-2017 NCTR (<http://www.nctr.sd>).
#
##############################################################################
from osv import fields, osv
import time
from datetime import datetime,date,timedelta
from tools.translate import _
class vehicle_report_wiz(osv.osv_memory):
""" To manage enrich report wizard """
_name = "fuel.slice.report.wiz"
_description = "Fuel Slice Report Wizard"
def _selection_year(self, cr, uid, context=None):
"""
Select car manufacturing year between 1970 and Current year.
@return: list of years
"""
return [(str(years), str(years)) for years in range(int(datetime.now().year) + 1, 1970, -1)]
_columns = {
'date_from': fields.date('Date From'),
'date_to': fields.date('Date To'),
'process_type': fields.selection([('modify','Modify'),('insert','Insert')],'Process Type'),
'department_id': fields.many2one('hr.department',string='Department'),
'category_id': fields.many2one('vehicle.category',string='Vehicle Category'),
'year': fields.selection(_selection_year, 'Model'),
'included_department': fields.boolean('Includes sub-departments'),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, context=c).company_id.id,
'included_department': False,
}
def check_date(self, cr, uid, ids, context=None):
"""
Constrain method to check if there is a place with the same name
@return: boolean True or False
"""
for rec in self.browse(cr, uid, ids, context=context):
if rec.date_from > rec.date_to:
raise osv.except_osv(_('ERROR'), _('The Start Date Must Be Before or Equal To the End Date'))
return True
_constraints = [
(check_date, '', []),
]
def print_report(self, cr, uid, ids, context=None):
"""
To print the report.
@return: print the report
"""
datas = {}
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'vehicle.fuel.slice',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'fuel_slice_report',
'datas':datas,
}
#if data['total_report'] == True:
'''if data['report_type'] in ['total_report']:
return {
'type': 'ir.actions.report.xml',
'report_name': 'total_vehicle_report',
'datas':datas,
}
elif data['report_type'] in ['total_number_report']:
return {
'type': 'ir.actions.report.xml',
'report_name': 'total_vehicle_number_report',
'datas':datas,
}
else:
return {
'type': 'ir.actions.report.xml',
'report_name': 'vehicle_report',
'datas':datas,
}''' | [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
94b322e6f3fc89092bd1e3c38f205837a8b9d53b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_sundowns.py | e5bbcdb49d823bb48b71f7c94f658c3e6ef988cd | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _SUNDOWNS():
def __init__(self,):
self.name = "SUNDOWNS"
self.definitions = sundown
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['sundown']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a9cb150fe24e6d478a7062b79a9926b1c2c792b8 | bd6fd6bb82bf3179a4571c7a2ca3a030f5684c5c | /mundo3-EstruturasCompostas/081-ExtraindoDadosDeUmaLista.py | bad61e35e4d14143637f0d9408833e3179835979 | [
"MIT"
] | permissive | jonasht/CursoEmVideo-CursoDePython3 | b3e70cea1df9f33f409c4c680761abe5e7b9e739 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | refs/heads/master | 2023-08-27T12:12:38.103023 | 2021-10-29T19:05:01 | 2021-10-29T19:05:01 | 276,724,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | lista = []
print('type s to exit\ndigite s para sair')
while 1:
num = input('N: ')
if num == 's': break
else: lista.append(int(num))
lista.reverse()
print('lista reversa', lista)
print('foram digitados', len(lista), ' numeros')
print('numero 5 foi digitado' if 5 in lista else 'sem 5')
#Exercício Python 081:
# Crie um programa que vai ler vários números e colocar em uma lista.
# Depois disso, mostre:
#A) Quantos números foram digitados.
#B) A lista de valores, ordenada de forma decrescente.
#C) Se o valor 5 foi digitado e está ou não na lista. | [
"jhenriquet@outlook.com.br"
] | jhenriquet@outlook.com.br |
1e3ed3f3fd8d5b2c94d100304c32df7f9ac83452 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/72/usersdata/212/39304/submittedfiles/tomadas.py | ef757877e9f3f17521a58f298b71fb444b7e3dab | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CODIGO AQUI
t1=int(input('digite o número de tomadas da régua do integrante 1:'))
t2=int(input('digite o número de tomadas da régua do integrante 2:'))
t3=int(input('digite o número de tomadas da régua do integrante 3:'))
t4=int(input('digite o número de tomadas da régua do integrante 4:'))
nt=(t1-1)+(t2-1)+(t3-1)+t4
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
1024a326cae1b15ef82188bdaf3d59809f4f0394 | 77353aa80cefff9856c423acdb1313f6f7239bc4 | /dictionary/dict_count_item.py | d5935917c6b69b866c300d20b63c95f6c0688023 | [] | no_license | upasek/python-learning | ed21bc555bd684fbb432d852a274dc5a8fff38de | 026c73fe8369254bffb3f78cfd80fb152648cffa | refs/heads/master | 2023-03-18T19:34:11.297607 | 2021-03-12T17:51:54 | 2021-03-12T17:51:54 | 284,996,974 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #Write a Python program to count number of items in a dictionary value that is a list.
dict = {'Alex': ['subj1', 'subj2', 'subj3'], 'David': ['subj1', 'subj2', 'subj3']}
print("Original dictionary :",dict)
count = 0
for values in dict.values():
count += len(values)
print("Number of items in a dictionary value that is a list is :",count)
| [
"kvsupase@gmail.com"
] | kvsupase@gmail.com |
5e6c0294c8f9f716e5347736ce9e9ba02b6e07b6 | 09e7c3aab7cd34c6caf701ec7224581f68c246b0 | /zkmanager/filters.py | 2743c148ad9fe1cd62398f3656f2e839414f9f73 | [] | no_license | itimor/kaoqin | d383430b29b67152469cf652690aa1ad4fd3c4eb | 8113f393c5375295494890a5d17fea2d47b30599 | refs/heads/master | 2021-04-15T03:49:19.965242 | 2018-05-03T05:38:24 | 2018-05-03T05:38:24 | 126,454,042 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # -*- coding: utf-8 -*-
# author: itimor
from .models import Punch
from django_filters import rest_framework as filters
from django_filters import DateFromToRangeFilter
class PunchFilter(filters.FilterSet):
create_date = DateFromToRangeFilter()
class Meta:
model = Punch
fields = ['create_date', 'user_id__username']
| [
"kevin@126.com"
] | kevin@126.com |
7bd962e4114a78c5aa9d3f87534c875261886917 | 13f33343e701fbfb4306c6835c24877e81dba12e | /backend/epic_kidz_3889/settings.py | 0e8f599bee2d8fae95582265c65cfb7a1d4d5a77 | [] | no_license | crowdbotics-apps/epic-kidz-3889 | 386f8b944b2c31438a6e5ae277c866ac0eb87921 | 64ced56bcffe1fa0e7d4d17de7b60e26ad1a7f91 | refs/heads/master | 2022-12-12T21:07:15.985176 | 2019-05-27T02:47:13 | 2019-05-27T02:47:13 | 188,760,034 | 0 | 0 | null | 2022-12-03T11:08:16 | 2019-05-27T02:47:10 | JavaScript | UTF-8 | Python | false | false | 4,752 | py | """
Django settings for epic_kidz_3889 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'epic_kidz_3889.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'epic_kidz_3889.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'epic_kidz_3889',
'USER': 'epic_kidz_3889',
'PASSWORD': 'epic_kidz_3889',
'HOST': 'localhost',
'PORT': '5432',
}
}
if env.str('DATABASE_URL', default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = env.str('SENDGRID_USERNAME', '')
EMAIL_HOST_PASSWORD = env.str('SENDGRID_PASSWORD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Import local settings
try:
from .local_settings import *
INSTALLED_APPS += DEBUG_APPS
except:
pass
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
db04a848e4b84dbd17930a7c2f34b562f45e428c | b13a1a96e9f1dddb3a3a44b636ca939b85962899 | /LevelFive/template_project/app_template/views.py | 76972ad28a08916481e475c5e6e8f27f5d09afed | [] | no_license | jspw/Django-Test | f266331c73c34b83b1189811a163567b6b4cc60b | 13a6d0146c9c78f8fa03c269e4546b5bbdb146bd | refs/heads/master | 2021-03-23T17:50:21.764636 | 2020-10-18T09:21:23 | 2020-10-18T09:21:23 | 247,472,132 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,923 | py | from django.shortcuts import render
from app_template.forms import UserProfileInfoForm,UserForm
from django.core import validators
from django import forms
#
from django.contrib.auth import authenticate,login,logout
from django.http import HttpResponseRedirect,HttpResponse
# from django.core.urlresolvers import reverse #django 2 removes urlresolvers
from django.urls import reverse
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
contest_dict = {'text':"Hello world!"}
return render(request,'app_template/index.html',contest_dict)
@login_required
def special(request):
return HttpResponse("You are loggedin , Nice!")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def basic(request):
return render(request,'app_template/basic.html')
def other(request):
return render(request,'app_template/other.html')
def relateive_template(request):
return render(
request,
'app_template/relative_url_template.html'
)
def signupform(request):
registered = False
if request.method == "POST":
user_form = UserForm(data=request.POST)
profile_form = UserProfileInfoForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password) #hashing the password
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else :
print(user_form.errors,profile_form.errors)
else :
user_form = UserForm()
profile_form = UserProfileInfoForm()
return render(
request,
'app_template/signup.html',
{
'user_form':user_form,
'profile_form':profile_form,
'registered':registered,
}
)
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username = username,password=password)
if user :
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('index'))
else :
return HttpResponse("Account is not Active")
else :
print("Someone tried to login and failed")
print("Username : {} and password {}".format(username,password))
return HttpResponse("Invalid login detailed supplied")
else :
return render(request,'app_template/login.html')
| [
"mhshifat757@gmail.com"
] | mhshifat757@gmail.com |
66a1617fd944f84ba67cfff2a6a9a9b743131465 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/util/state/FunctionAnalyzer.pyi | 8ef7e534299b278b57e27b3297e6d55cfed74262 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,284 | pyi | from typing import List
import ghidra.program.model.address
import ghidra.program.model.pcode
import ghidra.program.model.symbol
import ghidra.util.state
import ghidra.util.task
import java.lang
class FunctionAnalyzer(object):
def dataReference(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, storageVarnode: ghidra.program.model.pcode.Varnode, refType: ghidra.program.model.symbol.RefType, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Callback indicating that an absolute memory reference was encountered
@param op pcode operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param storageVarnode absolute storage Varnode
@param refType read/write/data reference type
@param monitor task monitor
@throws CancelledException if callback canceled by monitor
"""
...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def indirectDataReference(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, offsetVarnode: ghidra.program.model.pcode.Varnode, size: int, storageSpaceID: int, refType: ghidra.program.model.symbol.RefType, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Callback indicating that an indirect/computed memory reference was encountered using an indirect/computed offset
@param op pcode operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param offsetVarnode indirect/computed offset
@param size access size or -1 if not applicable
@param storageSpaceID storage space ID
@param refType read/write/data reference type
@param monitor task monitor
@throws CancelledException if callback canceled by monitor
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def resolvedFlow(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, destAddr: ghidra.program.model.address.Address, currentState: ghidra.util.state.ContextState, results: ghidra.util.state.ResultsState, monitor: ghidra.util.task.TaskMonitor) -> bool:
"""
Callback indicating that a call/branch destination was identified.
Analyzer should create reference if appropriate
Keep in mind that there could be other unidentified destinations.
@param op branch or call flow operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param destAddr destination address
@param results contains previous states leading upto the currentState
@param currentState current state at the branch/call
@param monitor task monitor
@return true if destination should be disassembled if not already
@throws CancelledException if callback canceled by monitor
"""
...
@overload
def stackReference(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, stackOffset: int, size: int, storageSpaceID: int, refType: ghidra.program.model.symbol.RefType, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Callback indicating that an absolute stack reference was encountered. A non-load/store
operation will have a -1 for both storageSpaceId and size.
@param op pcode operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param stackOffset stack offset
@param size access size or -1 if not applicable
@param storageSpaceID storage space ID or -1 if not applicable
@param refType read/write/data reference type
@param monitor task monitor
@throws CancelledException if callback canceled by monitor
"""
...
@overload
def stackReference(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, computedStackOffset: ghidra.util.state.VarnodeOperation, size: int, storageSpaceID: int, refType: ghidra.program.model.symbol.RefType, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Callback indicating that a computed stack reference was encountered. A non-load/store
operation will have a -1 for both storageSpaceId and size.
@param op pcode operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param computedStackOffset stack offset computation (i.e., VarnodeOperation w/ stack pointer)
@param size access size or -1 if not applicable
@param storageSpaceID storage space ID or -1 if not applicable
@param refType read/write/data reference type
@param monitor task monitor
@throws CancelledException if callback canceled by monitor
"""
...
def toString(self) -> unicode: ...
def unresolvedIndirectFlow(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, destination: ghidra.program.model.pcode.Varnode, currentState: ghidra.util.state.ContextState, results: ghidra.util.state.ResultsState, monitor: ghidra.util.task.TaskMonitor) -> List[ghidra.program.model.address.Address]:
"""
Callback indicating that a computed call/branch destination was not resolved.
@param op indirect branch or call flow operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param destination destination identified as a Varnode (may be an expression represented by
a {@link VarnodeOperation}
@param results contains previous states leading upto the currentState
@param currentState current state at the branch/call
@param monitor task monitor
@return list of resolved destinations which should be used or null. List of destination
addresses will trigger disassembly where necessary.
@throws CancelledException if callback cancelled by monitor
"""
...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
3fb9d6f478528789a6f211aea81aac01dd9a6fe1 | b7eb41b068614e04f38a969326f43d8f8119cb05 | /897__increasing_order_search_tree.py | 82e08117e36c06d7d355bd81c0c774907a48e697 | [] | no_license | YI-DING/daily-leetcode | ddfb6985bf5014886cba8d6219da243e0aa28d71 | a6d3898d900f2063302dc1ffc3dafd61eefa79b7 | refs/heads/master | 2020-05-19T06:07:21.557077 | 2019-07-19T16:31:46 | 2019-07-19T16:31:46 | 184,866,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def increasingBST(self, root: TreeNode):
print(f'{root.val} is what we are examining. Its left is {None if not root.left else root.left.val} and right is {None if not root.right else root.right.val}')
if not root:return root
if root.right:
print(f'we are iBSTing {root.right.val}')
root.right=Solution.increasingBST(self,root.right)
if not root.left:
print(f'{root.val} is done iBST')
return root
if not root.left.right:
root.left.right=root
print(f'we have lifted {root.left.val} and planted {root.val} to its right')
return root.left
left_subtree_right=root.left.right
while True:
if not left_subtree_right.right:
left_subtree_right.right=root
print(f'we have planted {root.val} to the right of {left_subtree_right.val}')
return Solution.increasingBST(self,root.left)
left_subtree_right=left_subtree_right.right
def increasingBST(self, root, tail = None):
if not root: return tail
res = self.increasingBST(root.left, root)
root.left = None
root.right = self.increasingBST(root.right, tail)
return res | [
"yiding1@uchicago.edu"
] | yiding1@uchicago.edu |
05d317851bc0a3a46cc148bd399a725b7cd60215 | ace30d0a4b1452171123c46eb0f917e106a70225 | /filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/neutron/plugins/ml2/ovo_rpc.py | ec7d84d2665cd4773f8a00eb74734e919975206b | [
"Python-2.0"
] | permissive | juancarlosdiaztorres/Ansible-OpenStack | e98aa8c1c59b0c0040c05df292964520dd796f71 | c01951b33e278de9e769c2d0609c0be61d2cb26b | refs/heads/master | 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 | Python | UTF-8 | Python | false | false | 6,363 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
import eventlet
from oslo_concurrency import lockutils
from oslo_log import log as logging
from neutron._i18n import _LE
from neutron.api.rpc.callbacks import events as rpc_events
from neutron.api.rpc.handlers import resources_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron import context as n_ctx
from neutron.db import api as db_api
from neutron.objects import network
from neutron.objects import ports
from neutron.objects import securitygroup
from neutron.objects import subnet
LOG = logging.getLogger(__name__)
class _ObjectChangeHandler(object):
def __init__(self, resource, object_class, resource_push_api):
self._resource = resource
self._obj_class = object_class
self._resource_push_api = resource_push_api
self._resources_to_push = {}
self._worker_pool = eventlet.GreenPool()
for event in (events.AFTER_CREATE, events.AFTER_UPDATE,
events.AFTER_DELETE):
registry.subscribe(self.handle_event, resource, event)
def wait(self):
"""Waits for all outstanding events to be dispatched."""
self._worker_pool.waitall()
@staticmethod
def _is_session_semantic_violated(context, resource, event):
"""Return True and print an ugly error on transaction violation.
This code is to print ugly errors when AFTER_CREATE/UPDATE
event transaction semantics are violated by other parts of
the code.
"""
if not context.session.is_active:
return False
stack = traceback.extract_stack()
stack = "".join(traceback.format_list(stack))
LOG.error(_LE("This handler is supposed to handle AFTER "
"events, as in 'AFTER it's committed', "
"not BEFORE. Offending resource event: "
"%(r)s, %(e)s. Location:\n%(l)s"),
{'r': resource, 'e': event, 'l': stack})
return True
def handle_event(self, resource, event, trigger,
context, *args, **kwargs):
"""Callback handler for resource change that pushes change to RPC.
We always retrieve the latest state and ignore what was in the
payload to ensure that we don't get any stale data.
"""
if self._is_session_semantic_violated(context, resource, event):
return
resource_id = self._extract_resource_id(kwargs)
# we preserve the context so we can trace a receive on the agent back
# to the server-side event that triggered it
self._resources_to_push[resource_id] = context.to_dict()
# spawn worker so we don't block main AFTER_UPDATE thread
self._worker_pool.spawn(self.dispatch_events)
@lockutils.synchronized('event-dispatch')
def dispatch_events(self):
# this is guarded by a lock to ensure we don't get too many concurrent
# dispatchers hitting the database simultaneously.
to_dispatch, self._resources_to_push = self._resources_to_push, {}
# TODO(kevinbenton): now that we are batching these, convert to a
# single get_objects call for all of them
for resource_id, context_dict in to_dispatch.items():
context = n_ctx.Context.from_dict(context_dict)
# attempt to get regardless of event type so concurrent delete
# after create/update is the same code-path as a delete event
with db_api.context_manager.independent.reader.using(context):
obj = self._obj_class.get_object(context, id=resource_id)
# CREATE events are always treated as UPDATE events to ensure
# listeners are written to handle out-of-order messages
if obj is None:
rpc_event = rpc_events.DELETED
# construct a fake object with the right ID so we can
# have a payload for the delete message.
obj = self._obj_class(id=resource_id)
else:
rpc_event = rpc_events.UPDATED
LOG.debug("Dispatching RPC callback event %s for %s %s.",
rpc_event, self._resource, resource_id)
self._resource_push_api.push(context, [obj], rpc_event)
def _extract_resource_id(self, callback_kwargs):
id_kwarg = '%s_id' % self._resource
if id_kwarg in callback_kwargs:
return callback_kwargs[id_kwarg]
if self._resource in callback_kwargs:
return callback_kwargs[self._resource]['id']
raise RuntimeError("Couldn't find resource ID in callback event")
class OVOServerRpcInterface(object):
"""ML2 server-side RPC interface.
Generates RPC callback notifications on ML2 object changes.
"""
def __init__(self):
self._rpc_pusher = resources_rpc.ResourcesPushRpcApi()
self._setup_change_handlers()
LOG.debug("ML2 OVO RPC backend initialized.")
def _setup_change_handlers(self):
"""Setup all of the local callback listeners for resource changes."""
resource_objclass_map = {
resources.PORT: ports.Port,
resources.SUBNET: subnet.Subnet,
resources.NETWORK: network.Network,
resources.SECURITY_GROUP: securitygroup.SecurityGroup,
resources.SECURITY_GROUP_RULE: securitygroup.SecurityGroupRule,
}
self._resource_handlers = {
res: _ObjectChangeHandler(res, obj_class, self._rpc_pusher)
for res, obj_class in resource_objclass_map.items()
}
def wait(self):
"""Wait for all handlers to finish processing async events."""
for handler in self._resource_handlers.values():
handler.wait()
| [
"jcdiaztorres96@gmail.com"
] | jcdiaztorres96@gmail.com |
04054e64b66cefba84ca094869841104b29f8fdb | 14d8adc86adc14c1d64a5550b1bbd5663e984545 | /链条/reverse_linked_list.py | 2a4d84cb454b930a26ef0694c55d5098cf8338e6 | [] | no_license | milllu/leetcode | e1b68ef7774cc0c1b49325ec1b87280d27570d94 | 458b3e72cd82a203b10bdca747c4c3ba85708f75 | refs/heads/master | 2020-03-30T23:41:46.180308 | 2018-10-11T01:08:31 | 2018-10-11T01:08:31 | 151,709,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | """
反转一个单链表。
示例:
输入: 1->2->3->4->5->NULL
输出: 5->4->3->2->1->NULL
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseList(self, head):
"""
迭代
"""
res = None
while head:
next_node = head.next
head.next, res = res, head
head = next_node
return res
def reverseList2(self, head):
"""递归"""
def _run(head, res):
if not head:
return res
next_node = head.next
head.next, res = res, head
return _run(next_node, res)
return _run(head, None)
| [
"3351440959@qq.com"
] | 3351440959@qq.com |
cb474534c6555535ead6b6ef459893799675e547 | 463c053bcf3f4a7337b634890720ea9467f14c87 | /python/ray/tune/tests/test_trial_executor_inheritance.py | 0ced35aff17c1af561f0108d7e295cdf648f720c | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 1,700 | py | import unittest
class TestTrialExecutorInheritance(unittest.TestCase):
def test_direct_inheritance_not_ok(self):
from ray.tune.trial_executor import TrialExecutor
msg = (
"_MyTrialExecutor inherits from TrialExecutor, which is being "
"deprecated. "
"RFC: https://github.com/ray-project/ray/issues/17593. "
"Please reach out on the Ray Github if you have any concerns."
)
with self.assertRaisesRegex(DeprecationWarning, msg):
class _MyTrialExecutor(TrialExecutor):
def __init__(self):
pass
def start_trial(self, trial):
return True
def stop_trial(self, trial):
pass
def restore(self, trial):
pass
def save(self, trial):
return None
def reset_trial(self, trial, new_config, new_experiment_tag):
return False
def debug_string(self):
return "This is a debug string."
def export_trial_if_needed(self):
return {}
def fetch_result(self):
return []
def get_next_available_trial(self):
return None
def get_running_trials(self):
return []
def test_indirect_inheritance_ok(self):
from ray.tune.ray_trial_executor import RayTrialExecutor
class _MyRayTrialExecutor(RayTrialExecutor):
pass
class _AnotherMyRayTrialExecutor(_MyRayTrialExecutor):
pass
| [
"noreply@github.com"
] | pdames.noreply@github.com |
3508427df51b9b799e24ea86dfef9e9c939e0510 | 9d5c9d9373002ab4ed1b493136517e8b4ab160e5 | /saas/backend/apps/role/filters.py | f626398fc2b1aff6f2a1f372c0d99597f01bf2aa | [
"MIT"
] | permissive | robert871126/bk-iam-saas | f8299bb632fc853ef0131d445f84c6084fc84aba | 33c8f4ffe8697081abcfc5771b98a88c0578059f | refs/heads/master | 2023-08-23T19:23:01.987394 | 2021-10-22T09:45:28 | 2021-10-22T09:45:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django_filters import rest_framework as filters
from backend.apps.role.models import Role, RoleCommonAction
class RatingMangerFilter(filters.FilterSet):
name = filters.CharFilter(lookup_expr="icontains", label="名称")
class Meta:
model = Role
fields = ["name"]
class RoleCommonActionFilter(filters.FilterSet):
system_id = filters.CharFilter(label="系统id")
class Meta:
model = RoleCommonAction
fields = ["system_id"]
| [
"zhu327@gmail.com"
] | zhu327@gmail.com |
1beee7a48a2061f0237ff1299fb0a91d09dcbc80 | c85aede0797e73dd719646a0f7671594b0d4e4e9 | /sbin/coveragerc_manager.py | 9292485b449a1bbfe9fe5d1bd624d95e573d4a61 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mtiid/putil | c0493535ed5ee7694546ee9193cad0a764c440fc | a99c84ee781aa9eb6e45272f95b82ac35648ba4b | refs/heads/master | 2021-01-18T09:05:50.437577 | 2016-01-20T16:01:12 | 2016-01-20T16:01:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,345 | py | #!/usr/bin/env python
# coveragerc_manager.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111
# Standard library imports
from __future__ import print_function
import os
import sys
###
# Global variables
###
SUBMODULES_LIST = ['plot', 'pcsv']
###
# Functions
###
def _write(fobj, data):
""" Simple file write """
fobj.write(data)
def get_source_files(sdir):
"""
Get Python source files that are not __init__.py and
interpreter-specific
"""
ver = 3 if sys.hexversion < 0x03000000 else 2
isf = []
isf.append('conftest.py')
isf.append('compat{0}.py'.format(ver))
return [
file_name
for file_name in os.listdir(sdir)
if file_name.endswith('.py') and (file_name != '__init__.py') and
(not any([file_name.endswith(item) for item in isf]))
]
def main(argv):
""" Processing """
# pylint: disable=R0912,R0914,R0915,W0702
debug = True
env = argv[0].strip('"').strip("'")
# Unpack command line arguments
print('Coverage manager')
print('Arguments received: {0}'.format(argv))
if env == 'tox':
print('Tox mode')
if len(argv[1:]) == 4:
mode_flag, interp, _, site_pkg_dir, submodules, module = (
argv[1:]+[SUBMODULES_LIST, '']
)
print(' mode_flag: {0}'.format(mode_flag))
print(' interp: {0}'.format(interp))
print(' site_pkg_dir: {0}'.format(site_pkg_dir))
print(' submodules: {0}'.format(submodules))
print(' module: {0}'.format(module))
else:
mode_flag, interp, _, module = argv[1:]+['']
print(' mode_flag: {0}'.format(mode_flag))
print(' interp: {0}'.format(interp))
print(' module: {0}'.format(module))
elif env == 'ci':
print('Continuous integration mode')
mode_flag, interp, _, site_pkg_dir, submodules, module = (
argv[1],
argv[2],
os.environ['REPO_DIR'],
argv[3],
SUBMODULES_LIST,
''
)
print(' mode_flag: {0}'.format(mode_flag))
print(' interp: {0}'.format(interp))
print(' site_pkg_dir: {0}'.format(site_pkg_dir))
print(' submodules: {0}'.format(submodules))
print(' module: {0}'.format(module))
elif env == 'local':
print('Local mode')
if len(argv[1:]) == 4:
mode_flag, interp, _, site_pkg_dir, submodules, module = (
argv[1],
argv[2],
argv[3],
argv[3],
[argv[4]],
argv[4]
)
else:
mode_flag, interp, _, site_pkg_dir, submodules, module = (
argv[1],
argv[2],
argv[3],
argv[3],
[''],
''
)
print(' mode_flag: {0}'.format(mode_flag))
print(' interp: {0}'.format(interp))
print(' site_pkg_dir: {0}'.format(site_pkg_dir))
print(' submodules: {0}'.format(submodules))
print(' module: {0}'.format(module))
# Generate .coveragerc file
is_submodule = module in SUBMODULES_LIST
source_dir = os.path.join(site_pkg_dir, 'putil')
output_file_name = os.path.join(
site_pkg_dir,
'putil',
'.coveragerc_{0}_{1}'.format(env, interp)
)
coverage_file_name = os.path.join(
site_pkg_dir, 'putil', '.coverage_{0}'.format(interp)
)
conf_file = []
conf_file.append(os.path.join(source_dir, 'conftest.py'))
conf_file.append(os.path.join(source_dir, 'plot', 'conftest.py'))
if mode_flag == '1':
lines = []
lines.append(
'# .coveragerc_{0} to control coverage.py during {1} runs'.format(
env,
env.capitalize()
)
)
lines.append('[report]')
lines.append('show_missing = True')
lines.append('[run]')
lines.append('branch = True')
lines.append('data_file = {0}'.format(coverage_file_name))
start_flag = True
# Include modules
source_files = get_source_files(os.path.join(site_pkg_dir, 'putil'))
for file_name in [
item
for item in source_files
if (env != 'local') or ((env == 'local') and
(not is_submodule) and (item == '{0}.py'.format(module)))]:
if file_name.endswith('version.py'):
continue
start_flag, prefix = (
(False, 'include = ')
if start_flag else
(False, 10*' ')
)
lines.append(
'{0}{1}'.format(prefix, os.path.join(
site_pkg_dir,
'putil',
file_name
)))
# Include sub-modules
if (env != 'local') or ((env == 'local') and is_submodule):
for submodule in submodules:
for file_name in [
item
for item in get_source_files(os.path.join(
site_pkg_dir,
'putil',
submodule))]:
start_flag, prefix = (
(False, 'include = ')
if start_flag else
(False, 10*' ')
)
lines.append('{0}{1}'.format(prefix, os.path.join(
site_pkg_dir,
'putil',
submodule,
file_name
)))
# Generate XML reports for continuous integration
if env == 'ci':
lines.append('[xml]')
lines.append('output = {0}'.format(os.path.join(
os.environ['RESULTS_DIR'],
'codecoverage',
'coverage.xml'
)))
# Write file
with open(output_file_name, 'w') as fobj:
_write(fobj, '\n'.join(lines))
# Echo file
if debug:
print('File: {0}'.format(output_file_name))
with open(output_file_name, 'r') as fobj:
print(''.join(fobj.readlines()))
# Generate conftest.py files to selectively
# skip Python 2 or Python 3 files
skip_file = (
"# pylint: disable=E0012,C0103,C0111,C0411\n"
"import sys\n"
"import matplotlib\n"
"matplotlib.rcParams['backend'] = 'Agg'\n"
"collect_ignore = []\n"
"if sys.hexversion < 0x03000000:\n"
" collect_ignore.append('compat3.py')\n"
"else:\n"
" collect_ignore.append('compat2.py')\n"
)
with open(conf_file[0], 'w') as fobj:
_write(fobj, skip_file)
else:
del_files = conf_file
del_files.append(output_file_name)
del_files.append(coverage_file_name)
try:
for fname in del_files:
print('Deleting file {0}'.format(fname))
os.remove(fname)
except:
pass
if __name__ == '__main__':
main(sys.argv[1:])
| [
"pmasdev@gmail.com"
] | pmasdev@gmail.com |
696ddcda78f4d5e20f42851f7bec6f166409b249 | c127946e261f7a1739f998bab5126825bb3c1399 | /osext/test/__main__.py | 6fa395430c40f7e4a7a30d3674dedfcae7be7c5b | [
"MIT"
] | permissive | Tatsh/osext | a9763aa87d357f90169a2595caff01616558d066 | d375990eee1b66fd2cd7bdde0d9313e2340eee3c | refs/heads/master | 2020-03-30T13:11:08.709090 | 2015-01-09T20:32:16 | 2015-01-09T20:32:16 | 28,982,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from osext.test import argparse_actions_test, pushdtest
import unittest
for test in (pushdtest, argparse_actions_test):
suite = unittest.TestLoader().loadTestsFromModule(test)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"audvare@gmail.com"
] | audvare@gmail.com |
08a264c9708f0dee374d65a90f0a2fc828e0b770 | 8afe87c4e26e08b1dc24090a39fbedd7fa84210a | /sdnmpi/topology.py | 61ce97e6272298ba52328eaa2955b933ae0546c1 | [] | no_license | keichi/sdn-mpi-router | ca1cc128fa4fff11b61851d34fae0b21ed4b65ab | b20be81d39363cc28a9a0a5826a4450f9946d9d9 | refs/heads/master | 2023-01-10T00:29:03.959435 | 2016-02-09T01:03:07 | 2016-02-09T01:03:07 | 56,743,429 | 1 | 0 | null | 2022-12-26T20:03:41 | 2016-04-21T04:50:56 | Python | UTF-8 | Python | false | false | 7,216 | py | from ryu.base import app_manager
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.controller.event import EventRequestBase, EventReplyBase
from ryu.topology import event, switches
from ryu.controller import ofp_event
from ryu.lib.mac import haddr_to_bin, BROADCAST_STR, BROADCAST
from ryu.lib.packet import packet, ethernet, udp
from util.topology_db import TopologyDB
class CurrentTopologyRequest(EventRequestBase):
def __init__(self):
super(CurrentTopologyRequest, self).__init__()
self.dst = "TopologyManager"
class CurrentTopologyReply(EventReplyBase):
def __init__(self, dst, topology):
super(CurrentTopologyReply, self).__init__(dst)
self.topology = topology
class FindRouteRequest(EventRequestBase):
def __init__(self, src_mac, dst_mac):
super(FindRouteRequest, self).__init__()
self.dst = "TopologyManager"
self.src_mac = src_mac
self.dst_mac = dst_mac
class FindRouteReply(EventReplyBase):
def __init__(self, dst, fdb):
super(FindRouteReply, self).__init__(dst)
self.fdb = fdb
class FindAllRoutesRequest(EventRequestBase):
def __init__(self, src_mac, dst_mac):
super(FindAllRoutesRequest, self).__init__()
self.dst = "TopologyManager"
self.src_mac = src_mac
self.dst_mac = dst_mac
class FindAllRoutesReply(EventReplyBase):
def __init__(self, dst, fdb):
super(FindAllRoutesReply, self).__init__(dst)
self.fdbs = fdbs
class BroadcastRequest(EventRequestBase):
def __init__(self, data, src_dpid, src_in_port):
super(BroadcastRequest, self).__init__()
self.dst = "TopologyManager"
self.data = data
self.src_dpid = src_dpid
self.src_in_port = src_in_port
class TopologyManager(app_manager.RyuApp):
_CONTEXTS = {
"switches": switches.Switches,
}
_EVENTS = [CurrentTopologyRequest, BroadcastRequest]
def __init__(self, *args, **kwargs):
super(TopologyManager, self).__init__(*args, **kwargs)
self.topologydb = TopologyDB()
def _add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
in_port=in_port, dl_dst=haddr_to_bin(dst))
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_msg(mod)
def _install_multicast_drop(self, datapath, dst):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(dl_dst=haddr_to_bin(dst))
# Install a flow to drop all packets sent to dst
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0xffff, actions=[])
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPStateChange, MAIN_DISPATCHER)
def _state_change_handler(self, ev):
datapath = ev.datapath
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
match = ofproto_parser.OFPMatch(dl_dst=BROADCAST)
actions = [ofproto_parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
# Install a flow to send all broadcast packets to the controller
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0xfffe, actions=actions)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
dst = eth.dst
# Do not handle IPv6 multicast packets
if dst.startswith("33:33"):
self._install_multicast_drop(datapath, dst)
return
# Do not handler unicast packets
elif dst != BROADCAST_STR:
return
# Do not handle announcement packets
udph = pkt.get_protocol(udp.udp)
if udph and udph.dst_port == 61000:
return
self._do_broadcast(msg.data, datapath.id, msg.in_port)
@set_ev_cls(CurrentTopologyRequest)
def _current_topology_request_handler(self, req):
reply = CurrentTopologyReply(req.src, self.topologydb)
self.reply_to_request(req, reply)
@set_ev_cls(FindRouteRequest)
def _find_route_request_handler(self, req):
fdb = self.topologydb.find_route(req.src_mac, req.dst_mac)
reply = FindRouteReply(req.src, fdb)
self.reply_to_request(req, reply)
@set_ev_cls(FindAllRoutesRequest)
def _find_all_routes_request_handler(self, req):
fdbs = self.topologydb.find_route(req.src_mac, req.dst_mac, True)
reply = FindAllRoutesRequest(req.src, fdbs)
self.reply_to_request(req, reply)
def _is_edge_port(self, port):
for dpid_to_link in self.topologydb.links.values():
for link in dpid_to_link.values():
if port == link.src or port == link.dst:
return False
return True
def _do_broadcast(self, data, dpid, in_port):
for switch in self.topologydb.switches.values():
datapath = switch.dp
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
# Only broadcast to non-reserved switch-to-host ports
ports = [p for p in switch.ports if self._is_edge_port(p)
and not p.is_reserved()]
# Exclude ingress port
if switch.dp.id == dpid:
ports = [p for p in ports if p.port_no != in_port]
actions = [ofproto_parser.OFPActionOutput(port.port_no)
for port in ports]
out = ofproto_parser.OFPPacketOut(
datapath=datapath, in_port=ofproto.OFPP_NONE,
buffer_id=ofproto.OFP_NO_BUFFER, actions=actions,
data=data)
datapath.send_msg(out)
@set_ev_cls(BroadcastRequest)
def _broadcast_request_handler(self, req):
self._do_broadcast(req.data, req.src_dpid, req.src_in_port)
self.reply_to_request(req, EventReplyBase(req.src))
@set_ev_cls(event.EventSwitchEnter)
def _event_switch_enter_handler(self, ev):
self.topologydb.add_switch(ev.switch)
@set_ev_cls(event.EventSwitchLeave)
def _event_switch_leave_handler(self, ev):
self.topologydb.delete_switch(ev.switch)
@set_ev_cls(event.EventLinkAdd)
def _event_link_add_handler(self, ev):
self.topologydb.add_link(ev.link)
@set_ev_cls(event.EventLinkDelete)
def _event_link_delete_handler(self, ev):
self.topologydb.delete_link(ev.link)
@set_ev_cls(event.EventHostAdd)
def _event_host_add_handler(self, ev):
self.topologydb.add_host(ev.host)
| [
"keichi.t@me.com"
] | keichi.t@me.com |
609168d13020a0c809176ddcb3d7c7dc19e27ab8 | 5c6a8cd15955f7ca5f822b17b56c37c36ca4144d | /networks/cnn_pathnet.py | b180e502fad140c272e3f43dd85b7daf79977d15 | [] | no_license | xavoliva/CAT | 57e48eb958d10f17071797645f4836ed33ae74a7 | 5f32ada1eed4bf4de4488840bd3ae7163e9dd22b | refs/heads/main | 2023-01-22T16:06:40.200292 | 2020-12-08T17:38:30 | 2020-12-08T17:38:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,851 | py | import sys
import torch
import numpy as np
import utils
class Net(torch.nn.Module):
def __init__(self,inputsize,taskcla,nhid,args=0):
super(Net,self).__init__()
ncha,size,_=inputsize
self.taskcla=taskcla
self.ntasks = len(self.taskcla)
"""
# Config of Sec 2.5 in the paper
expand_factor = 0.231 # to match num params
self.N = 5
self.M = 20 # Large M numbers like this, given our architecture, produce no training
#"""
"""
# Config of Sec 2.4 in the paper
expand_factor = 0.325 # match num params
self.N = 3
self.M = 10
#"""
#"""
# Better config found by us
expand_factor = 0.258 # match num params
self.N = 3
self.M = 16
#"""
self.L = 5 # our architecture has 5 layers
self.bestPath = -1 * np.ones((self.ntasks,self.L,self.N),dtype=np.int) #we need to remember this between the tasks
#init modules subnets
self.conv1=torch.nn.ModuleList()
self.sizec1 = int(expand_factor*64)
self.conv2=torch.nn.ModuleList()
self.sizec2 = int(expand_factor*128)
self.conv3=torch.nn.ModuleList()
self.sizec3 = int(expand_factor*256)
self.fc1=torch.nn.ModuleList()
self.sizefc1 = int(expand_factor*nhid)
self.fc2=torch.nn.ModuleList()
self.sizefc2 = int(expand_factor*nhid)
self.last=torch.nn.ModuleList()
self.maxpool=torch.nn.MaxPool2d(2)
self.relu=torch.nn.ReLU()
pdrop1 = args.pdrop1
pdrop2 = args.pdrop2
self.drop1=torch.nn.Dropout(pdrop1)
self.drop2=torch.nn.Dropout(pdrop2)
#declare task columns subnets
for j in range(self.M):
self.conv1.append(torch.nn.Conv2d(ncha,self.sizec1,kernel_size=size//8))
s=utils.compute_conv_output_size(size,size//8)
s=s//2
self.conv2.append(torch.nn.Conv2d(self.sizec1,self.sizec2,kernel_size=size//10))
s=utils.compute_conv_output_size(s,size//10)
s=s//2
self.conv3.append(torch.nn.Conv2d(self.sizec2,self.sizec3,kernel_size=2))
s=utils.compute_conv_output_size(s,2)
s=s//2
self.fc1.append(torch.nn.Linear(self.sizec3*s*s,self.sizefc1))
self.fc2.append(torch.nn.Linear(self.sizefc1,self.sizefc2))
for t,n in self.taskcla:
self.last.append(torch.nn.Linear(self.sizefc2,n))
print('CNN PathNet')
print('pdrop1: ',pdrop1)
print('pdrop2: ',pdrop2)
return
def forward(self,x,t,P=None):
if P is None:
P = self.bestPath[t]
# P is the genotype path matrix shaped LxN(no.layers x no.permitted modules)
h=self.maxpool(self.drop1(self.relu(self.conv1[P[0,0]](x))))
for j in range(1,self.N):
h = h + self.maxpool(self.drop1(self.relu(self.conv1[P[0,j]](x)))) #sum activations
h_pre=self.maxpool(self.drop1(self.relu(self.conv2[P[1,0]](h))))
for j in range(1,self.N):
h_pre = h_pre + self.maxpool(self.drop1(self.relu(self.conv2[P[1,j]](h)))) #sum activations
h = h_pre
h_pre=self.maxpool(self.drop2(self.relu(self.conv3[P[2,0]](h))))
for j in range(1,self.N):
h_pre = h_pre + self.maxpool(self.drop2(self.relu(self.conv3[P[2,j]](h)))) #sum activations
h=h_pre.view(x.size(0),-1)
h_pre=self.drop2(self.relu(self.fc1[P[3,0]](h)))
for j in range(1,self.N):
h_pre = h_pre + self.drop2(self.relu(self.fc1[P[3,j]](h))) #sum activations
h = h_pre
h_pre=self.drop2(self.relu(self.fc2[P[4,0]](h)))
for j in range(1,self.N):
h_pre = h_pre + self.drop2(self.relu(self.fc2[P[4,j]](h))) #sum activations
h = h_pre
y=[]
for t,i in self.taskcla:
y.append(self.last[t](h))
return y
def unfreeze_path(self,t,Path):
#freeze modules not in path P and the ones in bestPath paths for the previous tasks
for i in range(self.M):
self.unfreeze_module(self.conv1,i,Path[0,:],self.bestPath[0:t,0,:])
self.unfreeze_module(self.conv2,i,Path[1,:],self.bestPath[0:t,1,:])
self.unfreeze_module(self.conv3,i,Path[2,:],self.bestPath[0:t,2,:])
self.unfreeze_module(self.fc1,i,Path[3,:],self.bestPath[0:t,3,:])
self.unfreeze_module(self.fc2,i,Path[4,:],self.bestPath[0:t,4,:])
return
def unfreeze_module(self,layer,i,Path,bestPath):
if (i in Path) and (i not in bestPath): #if the current module is in the path and not in the bestPath
utils.set_req_grad(layer[i],True)
else:
utils.set_req_grad(layer[i],False)
return
| [
"15011700342Xuan"
] | 15011700342Xuan |
f8eb3d68f2d770a036a28684ef69c41aea31c054 | cd876d32aa66112892dc9550837ad843e3e03afd | /env_carzone/Lib/site-packages/django/core/management/commands/createcachetable.py | a12ceb3830b2b8047936d89d1ddde2574dd92d98 | [
"BSD-3-Clause"
] | permissive | viplavdube/Car-Yard-App | 7665b7e6e54f3b0e4a4da563151f85d65c225cef | 65381a50f828e80f31d25d4f35e497f51c2d224d | refs/heads/master | 2023-04-19T03:49:18.991604 | 2021-04-27T17:51:10 | 2021-04-27T17:51:10 | 349,094,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,591 | py | from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import (
DEFAULT_DB_ALIAS,
connections,
models,
router,
transaction,
)
from django.db.utils import DatabaseError
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
"args",
metavar="table_name",
nargs="*",
help="Optional table names. Otherwise, settings.CACHES is used to find cache tables.",
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
help="Nominates a database onto which the cache tables will be "
'installed. Defaults to the "default" database.',
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Does not create the table, just prints the SQL that would be run.",
)
def handle(self, *tablenames, **options):
db = options["database"]
self.verbosity = options["verbosity"]
dry_run = options["dry_run"]
if tablenames:
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename, dry_run)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table, dry_run)
def create_table(self, database, tablename, dry_run):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(
name="cache_key", max_length=255, unique=True, primary_key=True
),
models.TextField(name="value"),
models.DateTimeField(name="expires", db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [
qn(f.name),
f.db_type(connection=connection),
"%sNULL" % ("NOT " if not f.null else ""),
]
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append(
"CREATE %sINDEX %s ON %s (%s);"
% (
unique,
qn("%s_%s" % (tablename, f.name)),
qn(tablename),
qn(f.name),
)
)
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(
" %s%s" % (line, "," if i < len(table_output) - 1 else "")
)
full_statement.append(");")
full_statement = "\n".join(full_statement)
if dry_run:
self.stdout.write(full_statement)
for statement in index_output:
self.stdout.write(statement)
return
with transaction.atomic(
using=database, savepoint=connection.features.can_rollback_ddl
):
with connection.cursor() as curs:
try:
curs.execute(full_statement)
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s."
% (tablename, e)
)
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
| [
"viplav45@gmail.com"
] | viplav45@gmail.com |
574e2bed1cd21db75ad93f26f6a4d3ef13c1fe29 | e04dbc32247accf073e3089ed4013427ad182c7c | /ABC170/ABC170E.py | 1c214969afcd03042c9fd6af3e054cba88882ed0 | [] | no_license | twobooks/atcoder_training | 9deb237aed7d9de573c1134a858e96243fb73ca0 | aa81799ec87cc9c9d76de85c55e99ad5fa7676b5 | refs/heads/master | 2021-10-28T06:33:19.459975 | 2021-10-20T14:16:57 | 2021-10-20T14:16:57 | 233,233,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | # from math import factorial,sqrt,ceil #,gcd
# from itertools import permutations,combinations,combinations_with_replacement
# from collections import deque,Counter
# from bisect import bisect_left
# from heapq import heappush,heappop
# from numba import njit
# from functools import lru_cache # 簡単メモ化 @lru_cache(maxsize=1000)
# from fractions import gcd
# from decimal import Decimal, getcontext
# # getcontext().prec = 1000
# # eps = Decimal(10) ** (-100)
# import numpy as np # numpy.lcm()
# from scipy.sparse.csgraph import shortest_path, dijkstra, floyd_warshall, bellman_ford, johnson
# from scipy.sparse import csr_matrix
# from scipy.special import comb,perm #permはnPk
# import networkx as nx
# G = Graph()
# slist = "abcdefghijklmnopqrstuvwxyz"
MOD = 10**9 + 7
S = input()
N = int(input())
N,M = map(int,input().split())
lisA = list(map(int,input().split()))
# arrA = np.array(input().split(),dtype=np.int64)
print(ans)
# for row in board:
# print(*row,sep="") #unpackして間にスペース入れずに出力する
# print("{:.10f}".format(ans))
# print("{:0=10d}".format(ans))
| [
"twobookscom@gmail.com"
] | twobookscom@gmail.com |
9e6391d2d0d23d34224673b134a21aec53a8b1e5 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/atbash-cipher/732cc5e5db4b4586a3bb7cffc064fcb3.py | ba9e3ac694f7d1f30189a304637ff582b21d8087 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 580 | py | from string import (maketrans, translate, ascii_letters, ascii_lowercase,
punctuation)
atbash_cipher_trans = maketrans(ascii_letters, ascii_lowercase[::-1] * 2)
def encode(msg):
# Puts message in lower case, translates it
# and removes the whitespace and punctuation.
msg = msg.translate(atbash_cipher_trans, " " + punctuation)
# Formats the string into 5-blocks and returns
return " ".join([msg[i:i+5] for i in range(0, len(msg), 5)])
def decode(msg):
return msg.translate(atbash_cipher_trans, " " + punctuation)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
3c4e22716c2bd1accf2c11a6210fd7e12e7a2bcc | 64cd09628f599fe18bf38528309349f7ac0df71e | /Introduction/02_Introduction_numpy/10 Numpy functions/expand_dims.py | c146781e6b7b57ef3ce33ab609f0f9f00c00b100 | [] | no_license | JunyoungJang/Python | 958c057b2fd37c03876d3cf566ee27ee637bb020 | 76d4cd441deff8061e10608e0848360bc4f34490 | refs/heads/master | 2021-01-19T21:54:42.208469 | 2020-02-14T09:54:17 | 2020-02-14T09:54:17 | 83,768,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import numpy as np
a = np.array([1,2])
print a.shape # (2,)
b = np.expand_dims(a, axis=0)
print b.shape # (1, 2)
c = np.expand_dims(a, axis=1)
print c.shape # (2, 1) | [
"lakino@yonsei.ac.kr"
] | lakino@yonsei.ac.kr |
cb3f6f45cc319404055a4824bd21acb67168c260 | 6def5721d5c7c6a9cde32a19c58ec129d2c7c6b2 | /Week_3/socket.py | ef9afd70ca80bee32b0565b18c6528d74234e23e | [
"MIT"
] | permissive | Sid2697/Python-to-access-web-data | 9d054a7e5c6d2300ec3652f367f85e355a4702c5 | 74b61339cc9060263e3f2d22e9ed90281276ab50 | refs/heads/master | 2021-05-01T17:42:27.894592 | 2018-02-12T11:36:52 | 2018-02-12T11:36:52 | 120,995,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd = 'GET http://data.pr4e.org/intro-short.txt HTTP/1.0\r\n\r\n'.encode()
mysock.send(cmd)
while True:
data = mysock.recv(512)
if (len(data) < 1):
break
print(data.decode(),end='')
mysock.close()
| [
"noreply@github.com"
] | Sid2697.noreply@github.com |
d6e26c537e5b3b7cc83493da35a8217f0921a9d8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02995/s716079488.py | cb1f2ffe4fe50d37290c4cfff656f2583b7f8e62 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import math
a, b, c, d = map(int, input().split())
m = b//c + (-a//c)
n = b//d + (-a//d)
g = c * d // math.gcd(c, d)
p = b//g + (-a//g)
print(b-a+1-(m+1+n+1-(p+1))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a8c3c9123e66db1f7cb29f5b7e0b69ccab0c0c28 | b0210f0320e47e3384c43c56e686844081374c6d | /tyk2_input/L31/31-46_wat/run.py | 03974c39662571c19c71e562bc7471b25406fdc3 | [] | no_license | taisung/MSU_Rutgers-GTI | 2531a9346e82131a38dfdef727380f1c100f5def | 3914a07a6be9af6d3d968288b9d4c3049fc10066 | refs/heads/master | 2021-09-14T05:45:38.906495 | 2018-05-08T17:42:16 | 2018-05-08T17:42:16 | 114,943,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | import os
dir = '/mnt/scratch/songlin3/run/tyk2/L31/wat/ti_one-step/31_46/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../31-46_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
f2c37c2304e5871aa5b2d791a0ac8b0d94018f0b | 4ae775089e78aa6362545ae1f3b1beeb9dbba748 | /offset-game/envs/base_env.py | b546f66d8ed51eec258014406aa613eb4ab30903 | [
"MIT"
] | permissive | JosephDistefano/offset-human-interface | 9a3da267becaf1a2c2fafa20fd48684726e97c2c | 88ba05554c289f2c966f663b11930f37fc938a83 | refs/heads/master | 2020-09-14T17:52:53.198045 | 2019-11-19T01:51:42 | 2019-11-19T01:51:42 | 223,205,679 | 0 | 0 | MIT | 2019-11-21T15:31:41 | 2019-11-21T15:31:40 | null | UTF-8 | Python | false | false | 2,448 | py | import math
import numpy as np
import pybullet as p
import pybullet_data
import pybullet_utils.bullet_client as bc
class BaseEnv(object):
def __init__(self, config):
self.config = config
# Usage mode
if config['simulation']['headless']:
self.p = bc.BulletClient(connection_mode=p.DIRECT)
else:
self.p = bc.BulletClient(connection_mode=p.GUI)
self.p.resetDebugVisualizerCamera(cameraDistance=150,
cameraYaw=0,
cameraPitch=-89.999,
cameraTargetPosition=[0, 80, 0])
# Set gravity
self.p.setGravity(0, 0, -9.81)
self.p.setAdditionalSearchPath(pybullet_data.getDataPath()) # optional
# Set parameters for simulation
self.p.setPhysicsEngineParameter(
fixedTimeStep=config['simulation']['time_step'], numSubSteps=1)
# Setup ground
plane = self.p.loadURDF("plane.urdf", [0, 0, 0],
self.p.getQuaternionFromEuler(
[0, 0, math.pi / 2]),
useFixedBase=True,
globalScaling=20)
self.p.changeVisualShape(plane, -1)
return None
def get_initial_position(self, agent, n_agents):
grid = np.arange(n_agents).reshape(n_agents // 5, 5)
pos_xy = np.where(grid == agent)
return [pos_xy[0][0] * 20 + 10, pos_xy[1][0] * 20]
def _initial_setup(self, UGV, UAV):
# Number of UGV and UAV
self.n_ugv = self.config['simulation']['n_ugv']
self.n_uav = self.config['simulation']['n_uav']
ugv, uav = [], []
# Initialise the UGV and UAV
init_orientation = self.p.getQuaternionFromEuler([math.pi / 2, 0, 0])
for i, item in enumerate(range(self.n_ugv)):
position = self.get_initial_position(item, self.n_ugv)
init_pos = [position[0] * 0.25 + 2.5, position[1] * 0.25, 5]
ugv.append(UGV(init_pos, init_orientation, i, self.config))
for i, item in enumerate(range(self.n_uav)):
position = self.get_initial_position(item, self.n_uav)
init_pos = [position[0] * 0.25 + 2.5, position[1] * 0.25 - 1.5, 5]
uav.append(UAV(init_pos, init_orientation, i, self.config))
return uav, ugv
| [
"hemanthm2277@gmail.com"
] | hemanthm2277@gmail.com |
d32ed17280ed2172a5ce234c61ad8de6e9779b1b | b0c8bdf5f8045ca5c0322d8e1ca685d3c0d4944a | /download_soundcloud_playlist_to_wav.py | 544d86183ba1a30ba6945c2895977c04a0630028 | [
"MIT"
] | permissive | 255BITS/DCGAN-tensorflow | 428525fbd59f90f4ff54482f43e981ba82b72629 | 684a24da17d1359606d05a36b97c82a33b3fa4da | refs/heads/master | 2020-12-13T23:30:32.728196 | 2016-03-31T06:42:17 | 2016-03-31T06:42:17 | 54,516,766 | 0 | 0 | null | 2016-03-22T23:50:15 | 2016-03-22T23:50:15 | null | UTF-8 | Python | false | false | 537 | py | import os
import sys
import glob
import time
def do(command):
print("Running " + command)
print(os.system(command))
i = 0
if(len(sys.argv) > 1):
do("cd training/to_process && scdl -c -a -l "+sys.argv[1])
for file in glob.glob('training/to_process/**/*.mp3'):
wav_out = 'training/wav'+str(i)+'-'+str(time.time())+'.wav'
do("ffmpeg -i \""+file+"\" -ac 1 -bufsize 4k -b:v 4k "+wav_out)
#do("rm \""+file+"\"")
i+=1
else:
print("Usage: " + sys.argv[0]+" [link to soundcloud playlist]")
| [
"mikkel@255bits.com"
] | mikkel@255bits.com |
2ffd314ff2cf58180b30587ccf95ac157863664e | 618f7f381ef68cf6b4384ad2a544202f0f4d286e | /Plugins/ChatLikeCMD/ChatLikeCMD.py | bec65332609d49bcedccbb4344171b9b49370934 | [
"MIT"
] | permissive | Lao-Tzu-Taoism/EasierLife | bc7b4bed8cc79f9e348a34d13827b4e53d274ac8 | 1a6bb691f99c6075e92cf1e566529b9074f3edab | refs/heads/master | 2021-08-29T23:27:43.248207 | 2017-12-15T08:26:38 | 2017-12-15T08:26:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,974 | py | #coding=utf8
import thread, time, sys, os, platform
try:
import termios, tty
termios.tcgetattr, termios.tcsetattr
import threading
OS = 'Linux'
except (ImportError, AttributeError):
try:
import msvcrt
OS = 'Windows'
except ImportError:
raise Exception('Mac is currently not supported')
OS = 'Mac'
else:
getch = msvcrt.getwch
else:
def fn():
try:
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
ch = sys.stdin.read(1)
except:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
raise Exception
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
getch = fn
CMD_HISTORY = 30
class ChatLikeCMD():
def __init__(self, header = 'LittleCoder', symbol = '>', inPip = None, inputMaintain = False):
self.strBuff = []
self.cmdBuff = []
self.historyCmd = -1
self.cursor = 0
self.inPip = [] if inPip == None else inPip
self.outPip = []
self.isLaunch = False
self.isPause = False
self.header = header
self.symbol = symbol
self.inputMaintain = inputMaintain
def reprint_input(self):
sys.stdout.write(self.header + self.symbol)
if self.strBuff:
for i in self.strBuff: sys.stdout.write(i)
sys.stdout.flush()
def getch(self):
c = getch()
return c if c != '\r' else '\n'
def get_history_command(self, direction):
if direction == 'UP':
if self.historyCmd < CMD_HISTORY - 1 and self.historyCmd < len(self.cmdBuff) - 1: self.historyCmd += 1
else:
if self.historyCmd == 0: return ''
if self.historyCmd > 0: self.historyCmd -= 1
if -1 < self.historyCmd < len(self.cmdBuff): return self.cmdBuff[self.historyCmd]
def output_command(self, s):
self.outPip.append(s if isinstance(s, unicode) else s.decode(sys.stdin.encoding))
if len(self.cmdBuff) >= CMD_HISTORY: self.cmdBuff = self.cmdBuff[::-1].pop()[::-1]
self.cmdBuff.append(s)
def print_thread(self):
while self.isLaunch:
if self.inPip:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
print self.inPip.pop()
# linux special
sys.stdout.write('\r')
sys.stdout.flush()
self.reprint_input()
time.sleep(0.01)
def fast_input_test(self):
timer = threading.Timer(0.001, thread.interrupt_main)
c = None
try:
timer.start()
c = getch()
except:
pass
timer.cancel()
return c
def process_direction_char(self, c):
if OS == 'Windows':
if ord(c) == 72:
c = 'A'
elif ord(c) == 80:
c = 'B'
elif ord(c) == 77:
c = 'C'
elif ord(c) == 75:
c = 'D'
if ord(c) == 68: # LEFT
self.process_char('\b')
return
# cursor bugs
if self.cursor > 0:
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(75))
else:
sys.stdout.write(chr(27) + '[C')
self.cursor -= 1
elif ord(c) == 67: # RIGHT
return
# cursor bugs
if self.cursor < len(self.strBuff):
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(77))
else:
sys.stdout.write(chr(27) + '[D')
self.cursor += 1
elif ord(c) == 65: # UP
hc = self.get_history_command('UP')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
elif ord(c) == 66: # DOWN
hc = self.get_history_command('DOWN')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
else:
raise Exception(c)
def process_char(self, c):
if ord(c) == 27: # Esc
if OS == 'Linux':
fitc1 = self.fast_input_test()
if ord(fitc1) == 91:
fitc2 = self.fast_input_test()
if 65 <= ord(fitc2) <= 68:
self.process_direction_char(fitc2)
return
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.outPip.append(c)
time.sleep(0.02)
if 'fitc1' in dir():
self.process_char(fitc1)
self.cursor += 1
if 'fitc2' in dir():
self.process_char(fitc2)
self.cursor += 1
elif ord(c) == 3: # Ctrl+C
self.stop()
self.isPause = True
if raw_input('Exit?(y) ') == 'y':
sys.stdout.write('Command Line Exit')
else:
self.start()
self.isPause = False
elif ord(c) in (8, 127): # Backspace
if self.strBuff:
if ord(self.strBuff[-1]) < 128:
sys.stdout.write('\b \b')
else:
sys.stdout.write('\b\b \b')
if OS == 'Linux':
self.strBuff.pop()
self.strBuff.pop()
self.strBuff.pop()
self.cursor -= 1
elif c == '\n':
if self.strBuff:
if self.inputMaintain:
sys.stdout.write(c)
else:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.output_command(''.join(self.strBuff))
self.strBuff = []
self.historyCmd = -1
elif ord(c) == 224: # Windows direction
if OS == 'Windows':
direction = self.getch()
self.process_direction_char(direction)
else:
sys.stdout.write(c)
sys.stdout.flush()
self.strBuff.append(c)
self.cursor += 1
def command_thread(self):
c = None
while self.isLaunch:
c = self.getch()
self.process_char(c)
time.sleep(0.01)
def start(self):
self.isLaunch = True
thread.start_new_thread(self.print_thread, ())
self.reprint_input()
thread.start_new_thread(self.command_thread, ())
def stop(self):
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.isLaunch = False
def print_line(self, msg = None):
self.inPip.append(msg)
def clear(self):
os.system('cls' if platform.system() == 'Windows' else 'clear')
self.reprint_input()
def get_command_pip(self):
return self.outPip
def set_header(self, header):
self.header = header
if __name__ == '__main__':
c = ChatLikeCMD()
s = c.get_command_pip()
c.start()
def loopinput(c):
while True:
c.print_line('LOOP INPUT......')
time.sleep(3)
thread.start_new_thread(loopinput, (c,))
while c.isLaunch or c.isPause:
if s:
c.print_line(s.pop())
time.sleep(0.01)
| [
"i7meavnktqegm1b@qq.com"
] | i7meavnktqegm1b@qq.com |
e9d20af65be43f8bf9ec9fb4ea874002c82bf2e2 | 22b906ca2dab20d8b88e58a5bc862ddc15960f05 | /src/python/app/controllers/sample_controller.py | 951eda108863580add8f7a796efcc5e2b11da0e3 | [] | no_license | itsumura-h/speed_test | 3705d7e587362f14ed7cbc33e0e5b6463e3d94da | 57c999e7833cfc1e3abb48bf46e77df48732e1a1 | refs/heads/master | 2022-12-15T13:11:15.125585 | 2019-09-03T14:14:53 | 2019-09-03T14:14:53 | 204,280,512 | 0 | 0 | null | 2022-04-22T22:15:41 | 2019-08-25T10:42:30 | Python | UTF-8 | Python | false | false | 268 | py | from django.http.response import JsonResponse
from ..services.domain_services.sample_service import SampleService
class SampleController:
def fib(self, num):
new_num = int(num)
data = SampleService().fib(new_num)
return JsonResponse(data)
| [
"dumblepy@gmail.com"
] | dumblepy@gmail.com |
f9529e9350359629ea1dd8c1d73962ffbc7413c0 | d930edd227427e03931a9d4be2997bfaea8cb3a6 | /unit_10/talk.py | 6e153a52f84943baf0c4afb8752eed45e8b71b7f | [] | no_license | mikeselezniov/python-21v | 2d95c5a87c9d1e9371577127d4dfbc03a1b77d7f | 8c5753ccdc07492ea9cf46acac643c9e9674d4c7 | refs/heads/master | 2020-12-28T04:38:54.456159 | 2015-07-05T19:06:24 | 2015-07-05T19:06:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # -*- coding:utf-8 -*-
# функция в Python'e может быть определена… внутри другой функции!
def talk():
# Внутри определения функции "talk" мы можем определить другую...
def whisper(word="да"):
return word.lower()+"...";
# ... и сразу же её использовать!
print whisper()
# Теперь, КАЖДЫЙ РАЗ при вызове "talk", внутри неё определяется а затем
# и вызывается функция "whisper".
talk() # выведет: "да..."
# Но вне функции "talk" НЕ существует никакой функции "whisper":
try:
print whisper()
except NameError, e:
print e
#выведет : "name 'whisper' is not defined" | [
"janusnic@gmail.com"
] | janusnic@gmail.com |
05a625ebd18925ce38954b5a657ed70269ef23fd | f7c82725ae83896385109ffa1476eb98e411b13e | /setup.py | cee2f8da328b2ae8f2a02d7dd08b28374400aa18 | [
"MIT"
] | permissive | myousefi2016/vtkInterface | f173fda29648ee28fb8e0ba39d3724a6d4ef6205 | 3c18f4a5fe25b67f918809fd0589b80bbf3bff1d | refs/heads/master | 2021-06-20T03:41:42.666862 | 2017-08-04T12:44:52 | 2017-08-04T12:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | """
Installation file for python vtkInterface module
"""
from setuptools import setup
import os
from io import open as io_open
package_name = 'vtkInterface'
# Get version from tqdm/_version.py
__version__ = None
version_file = os.path.join(os.path.dirname(__file__), package_name, '_version.py')
with io_open(version_file, mode='r') as fd:
# execute file from raw string
exec(fd.read())
# Actual setup
setup(
name=package_name,
packages = [package_name, 'vtkInterface.tests', 'vtkInterface.examples'],
# Version
version=__version__,
description='Easier Pythonic interface to VTK',
long_description=open('README.rst').read(),
# long_description=open('pypiREADME.rst').read(),
# Author details
author='Alex Kaszynski',
author_email='akascap@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
# Target audience
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
# MIT License
'License :: OSI Approved :: MIT License',
# Untested, but will probably work for other python versions
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# Website
url = 'https://github.com/akaszynski/vtkInterface',
keywords='vtk numpy plotting mesh',
package_data={'vtkInterface.examples': ['airplane.ply', 'ant.ply',
'hexbeam.vtk', 'sphere.ply']},
install_requires=['numpy'],
)
| [
"akascap@gmail.com"
] | akascap@gmail.com |
be70b94decd74015163cc6d0d9c11389e0116e44 | 0bb1d74bac2872b76fb7ae5bfb40e36ecac7cfa2 | /py/funcion.py | 5e4a47cda6a6e1d6f2f02f8dbe8ac97b457d41d4 | [] | no_license | antalcides/migit | 965349fa53f4e2c99419fc15ae2e3c2e6c9cc3cf | e9b611e1ba91a63d52b14efb9963eec7f4c20d75 | refs/heads/master | 2021-07-13T04:59:38.732109 | 2020-09-05T03:39:24 | 2020-09-05T03:39:24 | 74,794,023 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 16:09:00 2015
@author: antalcides
"""
from math import* # in main
def f(x):
e = exp(-0.1*x)
s = sin(6*pi*x)
return e*s # in main
x = 2
y = f(x)
print 'f(%g)=%g' % (x, y) | [
"antalcides@gmail.com"
] | antalcides@gmail.com |
7fa8c4b0e4c9540f36fbf566ad99d48e913d1c26 | 885feec0699da96fcfa1e118adffbc94b4f31fd1 | /src/tbm_utils/path.py | dc75a16cd2a6428dad91d7e6043bacbfaaea12e7 | [
"MIT"
] | permissive | ddboline/tbm-utils | d48bfdb98737a4a45def81143bf13fa11f00f6d3 | 7f77bc25651079bc8884de1cfcb45e28d672fb16 | refs/heads/master | 2020-08-29T11:43:49.571204 | 2019-10-16T20:05:55 | 2019-10-16T20:05:55 | 218,022,543 | 0 | 0 | MIT | 2019-10-28T10:34:34 | 2019-10-28T10:34:34 | null | UTF-8 | Python | false | false | 664 | py | __all__ = [
'UNIX_PATH_RE',
'convert_unix_path'
]
import re
from pathlib import Path
UNIX_PATH_RE = re.compile(r'(/(cygdrive/)?)(.*)')
"""Regex pattern matching UNIX-style filepaths."""
def convert_unix_path(filepath):
"""Convert Unix filepath string from Unix to Windows format.
Parameters:
filepath (str, os.PathLike, Path): A filepath string.
Returns:
Path: A Windows path object.
Raises:
FileNotFoundError
subprocess.CalledProcessError
"""
match = UNIX_PATH_RE.match(str(filepath))
if not match:
return Path(filepath.replace('/', r'\\'))
parts = match.group(3).split('/')
parts[0] = f"{parts[0].upper()}:/"
return Path(*parts)
| [
"mail@thebigmunch.me"
] | mail@thebigmunch.me |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.