blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b867f04b4d5338ab74beb5d363063aedb323f8bc
|
9b2255e0a474555d8a4d90f586e280d40224a181
|
/apps/navigation/api.py
|
c97f598f1ff10310113981f52a3913081ccd3fed
|
[] |
no_license
|
rogeriofalcone/redirector
|
85f496f7c3a3c755b2d9f86f90d25ace783842e4
|
8255be80ce4e3245317864dcc580a1ef68a7c244
|
refs/heads/master
| 2020-04-08T07:03:19.053680
| 2012-08-12T19:13:35
| 2012-08-12T19:13:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,083
|
py
|
import copy
object_navigation = {}
multi_object_navigation = {}
model_list_columns = {}
sidebar_templates = {}
top_menu_entries = []
def register_multi_item_links(src, links, menu_name=None):
"""
Register a multiple item action action to be displayed in the
generic list template
"""
multi_object_navigation.setdefault(menu_name, {})
if hasattr(src, '__iter__'):
for one_src in src:
multi_object_navigation[menu_name].setdefault(one_src, {'links': []})
multi_object_navigation[menu_name][one_src]['links'].extend(links)
else:
multi_object_navigation[menu_name].setdefault(src, {'links': []})
multi_object_navigation[menu_name][src]['links'].extend(links)
def register_links(src, links, menu_name=None, position=None):
"""
Associate a link to a model a view, or an url
"""
object_navigation.setdefault(menu_name, {})
if hasattr(src, '__iter__'):
for one_src in src:
object_navigation[menu_name].setdefault(one_src, {'links': []})
if position is not None:
for link in reversed(links):
object_navigation[menu_name][one_src]['links'].insert(position, link)
else:
object_navigation[menu_name][one_src]['links'].extend(links)
else:
object_navigation[menu_name].setdefault(src, {'links': []})
if position is not None:
for link in reversed(links):
object_navigation[menu_name][src]['links'].insert(position, link)
else:
object_navigation[menu_name][src]['links'].extend(links)
def register_top_menu(name, link, children_views=None,
children_path_regex=None, children_view_regex=None,
position=None):
"""
Register a new menu entry for the main menu displayed at the top
of the page
"""
entry = {'link': link, 'name': name}
if children_views:
entry['children_views'] = children_views
if children_path_regex:
entry['children_path_regex'] = children_path_regex
if children_view_regex:
entry['children_view_regex'] = children_view_regex
if position is not None:
entry['position'] = position
top_menu_entries.insert(position, entry)
else:
length = len(top_menu_entries)
entry['position'] = length
top_menu_entries.append(entry)
sort_menu_entries()
return entry
def sort_menu_entries():
global top_menu_entries
top_menu_entries = sorted(top_menu_entries, key=lambda k: (k['position'] < 0, k['position']))
def register_model_list_columns(model, columns):
"""
Define which columns will be displayed in the generic list template
for a given model
"""
model_list_columns.setdefault(model, [])
model_list_columns[model].extend(columns)
def register_sidebar_template(source_list, template_name):
for source in source_list:
sidebar_templates.setdefault(source, [])
sidebar_templates[source].append(template_name)
|
[
"Roberto.Rosario.Gonzalez@gmail.com"
] |
Roberto.Rosario.Gonzalez@gmail.com
|
fbe5e008b57babb92e2dd06f1be7edb60f2f4dac
|
299e5934971f9de638692e2667d6e270bcab5cbd
|
/13.罗马数字转整数.py
|
6aa2f8131eac1ddc741f6bd0b4331f804a96ec00
|
[] |
no_license
|
ycj123/Leetcode-Python3
|
14bcd6c9f4d26191d5d40c77e923df4d0be4c0e5
|
1593960cdf2655ef1dcf68e3517e7121670c6ac3
|
refs/heads/master
| 2022-12-16T23:12:19.326702
| 2020-09-18T00:17:45
| 2020-09-18T00:17:45
| 295,302,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
#
# @lc app=leetcode.cn id=13 lang=python3
#
# [13] 罗马数字转整数
#
# 优先匹配长度为2
# @lc code=start
class Solution:
def romanToInt(self, s: str) -> int:
helper = {
'I':1,
'V':5,
'IV':4,
'X':10,
'IX':9,
'L':50,
'C':100,
'XL':40,
'XC':90,
'D':500,
'M':1000,
'CD':400,
'CM':900
}
length = len(s)
i = 0
res = 0
while i < length:
if s[i:i+2] in helper:
res += helper[s[i:i+2]]
i+=2
else:
res += helper[s[i]]
i+=1
return res
# @lc code=end
|
[
"yangchijiang@icloud.com"
] |
yangchijiang@icloud.com
|
9ebb2d8c25c8e4c6f8e7973204a6127aa4844a89
|
40530b1d97c237944e4a01826f2e7a746d98acc0
|
/modifydevicetree.py
|
e555e1cd8a21521c84bf59a3b2062e527d8e0050
|
[
"CC0-1.0"
] |
permissive
|
Hackveda/XNUQEMUScripts
|
e0270b27a3cb889822a8429a0842e548507a34c8
|
d5492cbffe2c1ce29026901b833c0d9cd295f37b
|
refs/heads/master
| 2021-01-05T22:40:01.532712
| 2018-07-21T17:07:54
| 2018-07-21T17:07:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,320
|
py
|
import sys
from devicetreefromim4p import *
#removeCompatibles = [b"aic,1", b"pmgr1,t8015", b"wdt,t8015\x00wdt,s5l8960x", b"gpio,t8015\x00gpio,s5l8960x", b"sochot,s5l8955x", b"tempsensor,t8015", b"aes,s8000"]
keepCompatibles = [b"uart-1,samsung", b"D22AP\x00iPhone10,3\x00AppleARM"]
removeNames = [b"wdt", b"backlight"]
removeDeviceTypes = [b"wdt", b"backlight"]
# pexpert/pexpert/device_tree.h
def u32(a, i):
return a[i] | a[i+1] << 8 | a[i+2] << 16 | a[i+3] << 24
def w32(a, i, v):
a[i] = v & 0xff
a[i+1] = (v >> 8) & 0xff
a[i+2] = (v >> 16) & 0xff
a[i+3] = (v >> 24) & 0xff
def writenode(nodebytes, nodeoffset, nodedepth):
nProperties = u32(nodebytes, nodeoffset)
nChildren = u32(nodebytes, nodeoffset + 4)
ptr = 8
for p in range(nProperties):
ptr += writeproperty(nodebytes, nodeoffset + ptr, nodedepth)
for c in range(nChildren):
ptr += writenode(nodebytes, nodeoffset + ptr, nodedepth + 1)
return ptr
def padStringNull(instr, lenstr=32):
return instr.encode("ascii") + b"\x00"*(lenstr - len(instr))
def writeproperty(nodebytes, nodeoffset, nodedepth):
kPropNameLength = 32
propname = nodebytes[nodeoffset:nodeoffset + kPropNameLength].rstrip(b"\x00").decode("utf-8")
ptr = kPropNameLength
proplen = u32(nodebytes, nodeoffset + ptr) & 0x7fffffff
if u32(nodebytes, nodeoffset + ptr) != proplen:
w32(nodebytes, nodeoffset + ptr, proplen)
ptr += 4
if propname == "timebase-frequency" and u32(nodebytes, nodeoffset + ptr) == 0:
print("setting timebase")
w32(nodebytes, nodeoffset + ptr, (1000 * 1000 * 1000)//16)
if propname == "random-seed":
print("setting random seed")
w32(nodebytes, nodeoffset + ptr, 0xdeadf00d)
if propname == "dram-vendor-id":
print("Removing dram-vendor-id")
nodebytes[nodeoffset:nodeoffset + kPropNameLength] = padStringNull("chip-epoch")
nodebytes[nodeoffset + ptr:nodeoffset + ptr + proplen] = b"\x00" * proplen
if propname == "display-corner-radius":
print("Removing display-corner-radius")
nodebytes[nodeoffset:nodeoffset + kPropNameLength] = padStringNull("security-domain")
nodebytes[nodeoffset + ptr:nodeoffset + ptr + proplen] = b"\x00" * proplen
if propname == "compatible" and not nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1] in keepCompatibles:
print("removing compatible for", nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1].decode("ascii"))
nodebytes[nodeoffset+ptr:nodeoffset + ptr + proplen - 1] = b"~" * (proplen - 1)
if propname == "name" and nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1] in removeNames:
print("removing name for", nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1].decode("ascii"))
nodebytes[nodeoffset+ptr] = ord("~")
if propname == "device_type" and nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1] in removeDeviceTypes:
print("removing device type for", nodebytes[nodeoffset+ptr:nodeoffset+ptr+proplen-1].decode("ascii"))
nodebytes[nodeoffset+ptr] = ord("~")
ptr += proplen
ptr = (ptr + 0x3) & ~0x3 #round up to nearest 4
return ptr
def printone(filename, outname):
with open(filename, "rb") as infile:
indata = infile.read()
devicetreebytes = bytearray(devicetreefromim4p(indata))
size = writenode(devicetreebytes, 0, 0)
with open(outname, "wb") as outfile:
outfile.write(devicetreebytes[:size])
if __name__ == "__main__":
printone(sys.argv[1], sys.argv[2])
|
[
"zhuoweizhang@yahoo.com"
] |
zhuoweizhang@yahoo.com
|
95509adab1d7fd1668388a55b0f78c9747fc7365
|
23744d0acc0119d0222c003179335af5b3259a67
|
/DOTA_configs/_base_/datasets/NV10.py
|
5ce6a3f114d7cd51496ba0e3c9b8a205c7f78373
|
[
"Apache-2.0"
] |
permissive
|
yawudede/mmd_rs
|
f3db3c0288eea67b78fdde09600d9b1ef4e60478
|
a0a468933e69bf90d9bc71c37c8626d8dda7fd24
|
refs/heads/master
| 2023-03-18T01:00:11.029808
| 2021-03-23T03:41:14
| 2021-03-23T03:41:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
dataset_type = 'CocoDataset'
data_root = 'data/NWPU_VHR_10/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(800, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
cat_name_list =['airplane', 'ship', 'storage tank', 'baseball diamond', 'tennis court',
'basketball court', 'ground track field', 'harbor', 'bridge', 'vehicle']
num_classes = len(cat_name_list) # 10
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'train_val_coco_ann.json',
img_prefix=data_root + 'images',
classes=cat_name_list,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'test_coco_ann.json',
img_prefix=data_root + 'images',
classes=cat_name_list,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'test_coco_ann.json',
img_prefix=data_root + 'images',
classes=cat_name_list,
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
max_bbox_per_img = 100
|
[
"alexhzy@126.com"
] |
alexhzy@126.com
|
c98407abf255ab9a3130d1de2aec9835447aed79
|
d1ea752332028ad6e621b68c32496cad5ae33fa4
|
/backend/testing_module_options_1/urls.py
|
89bfdd81e27b4afcd715afbaecd9cc0510ba8d08
|
[] |
no_license
|
crowdbotics-apps/testing-module-options-1
|
872d24a6dd8e34c0b73b61e066b3187f2a4adfab
|
1a061e46115787df3b1013aeb5282891f825b473
|
refs/heads/master
| 2023-07-27T02:01:22.644918
| 2021-09-16T14:50:17
| 2021-09-16T14:50:17
| 407,200,684
| 0
| 0
| null | 2021-09-16T14:51:11
| 2021-09-16T14:40:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
"""testing_module_options_1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "testing module options"
admin.site.site_title = "testing module options Admin Portal"
admin.site.index_title = "testing module options Admin"
# swagger
api_info = openapi.Info(
title="testing module options API",
default_version="v1",
description="API documentation for testing module options App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
5c1df1d7fc9d51ab054ea9607fffab52a876136c
|
55a729cee20bb37f9a2bab323b1afb148e0768d2
|
/pyigm/cgm/cgmsurvey.py
|
d2d1529f9d090035854331d413db2ec16708549b
|
[] |
no_license
|
SunilSimha/pyigm
|
502b1d46533dfec60dacfa41498b38bcc9622298
|
cd09dedce40cf959ed15eaecd4f01f05856dd9f2
|
refs/heads/master
| 2021-01-25T06:24:29.282634
| 2017-06-01T18:54:39
| 2017-06-01T18:54:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,740
|
py
|
""" Classes for CGM Surveys
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os
import warnings
import pdb
import json, io
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from pyigm.utils import lst_to_array
from pyigm.surveys.igmsurvey import GenericIGMSurvey
from pyigm.cgm.cgm import CGMAbsSys
try:
basestring
except NameError: # For Python 3
basestring = str
class CGMAbsSurvey(object):
"""A CGM Survey class in absorption
Attributes
----------
survey : str, optional
Survey name
ref : str, optional
Reference(s)
"""
@classmethod
def from_tarball(cls, tfile, debug=False, **kwargs):
""" Load the COS-Halos survey from a tarball of JSON files
Parameters
----------
tfile : str
Returns
-------
"""
import tarfile
import json
from linetools.lists.linelist import LineList
llist = LineList('ISM')
slf = cls(**kwargs)
# Load
tar = tarfile.open(tfile)
for kk, member in enumerate(tar.getmembers()):
if '.' not in member.name:
print('Skipping a likely folder: {:s}'.format(member.name))
continue
# Debug
if debug and (kk == 5):
break
# Extract
f = tar.extractfile(member)
tdict = json.load(f)
# Generate
cgmsys = CGMAbsSys.from_dict(tdict, chk_vel=False, chk_sep=False, chk_data=False,
use_coord=True, use_angrho=True,
linelist=llist, **kwargs)
slf.cgm_abs.append(cgmsys)
tar.close()
# Return
return slf
def __init__(self, survey='', ref='', **kwargs):
"""
Parameters
----------
survey : str, optional
ref : str, optional
Returns
-------
"""
# Name of survey
self.survey = survey
self.ref = ref
self.cgm_abs = []
self.mask = None
@property
def nsys(self):
""" Number of systems
Returns
-------
nsys : int
"""
return len(self.cgm_abs)
def to_json_tarball(self, outfil):
""" Generates a gzipped tarball of JSON files, one per system
Parameters
----------
outfil : str
"""
import subprocess
tmpdir = 'CGM_JSON'
try:
os.mkdir(tmpdir)
except OSError:
pass
jfiles = []
# Loop on systems
for cgm_abs in self.cgm_abs:
# Dict
cdict = cgm_abs.to_dict()
# Temporary JSON file
json_fil = tmpdir+'/'+cgm_abs.name+'.json'
jfiles.append(json_fil)
with io.open(json_fil, 'w', encoding='utf-8') as f:
#try:
f.write(unicode(json.dumps(cdict, sort_keys=True, indent=4,
separators=(',', ': '))))
# Tar
warnings.warn("Modify to write directly to tar file")
subprocess.call(['tar', '-czf', outfil, tmpdir])
print('Wrote: {:s}'.format(outfil))
# Clean up
for jfile in jfiles:
os.remove(jfile)
os.rmdir(tmpdir)
def ion_tbl(self, Zion, fill_ion=True):
""" Generate a Table of Ionic column densities for an input ion
Parameters
----------
Zion : tuple or str
fill_ion : bool, optional
Fill each ionN table in the survey (a bit slow)
Returns
-------
tbl : astropy.Table
"""
from linetools.abund.ions import name_to_ion
if isinstance(Zion, basestring):
Zion = name_to_ion(Zion)
# Generate dummy IGMSurvey
dumb = GenericIGMSurvey()
names = []
for cgmabs in self.cgm_abs:
if fill_ion:
cgmabs.igm_sys.fill_ionN()
if cgmabs.igm_sys._ionN is not None:
dumb._abs_sys.append(cgmabs.igm_sys)
# Names
names.append(cgmabs.name)
# Run ions
tbl = dumb.ions(Zion)
# Add CGM name
tbl.add_column(Column(names, name='cgm_name'))
# Return
return tbl
def trans_tbl(self, inp, fill_ion=True):
""" Generate a Table of Data on a given transition, e.g. SiIII 1206
Parameters
----------
inp : str or Quantity
str -- Name of the transition, e.g. 'CII 1334'
Quantity -- Rest wavelength of the transition, e.g. 1334.53*u.AA to 0.01 precision
Returns
-------
tbl : astropy.Table
"""
# Generate dummy IGMSurvey
dumb = GenericIGMSurvey()
names = []
for cgmabs in self.cgm_abs:
dumb._abs_sys.append(cgmabs.igm_sys)
# Names
names.append(cgmabs.name)
# Run ions
tbl = dumb.trans(inp)
# Add CGM name
tbl.add_column(Column(names, name='cgm_name'))
# Return
return tbl
def abs_kin(self, lbl):
""" Create a Table of the Kinematic info
Parameters
----------
lbl : string
Label for the Kinematics dict
TODO:
Add wrest!!
"""
from astropy.table import Table
keys = self.cgm_abs[0].igm_sys.kin[lbl].keys
t = Table(names=keys,
dtype=self.cgm_abs[0].igm_sys.kin[lbl].key_dtype)
for cgm_abs in self.cgm_abs:
try:
kdict = cgm_abs.igm_sys.kin[lbl]
except KeyError:
# No dict. Filling in zeros
row = [0 for key in keys]
t.add_row( row )
continue
# Filling
row = [kdict[key] for key in keys]
t.add_row( row )
return t
def __getattr__(self, k):
# Try Self first
try:
lst = [getattr(cgm_abs, k) for cgm_abs in self.cgm_abs]
except AttributeError:
# Galaxy?
try:
lst = [getattr(cgm_abs.galaxy, k) for cgm_abs in self.cgm_abs]
except AttributeError:
# Try AbsLine_Sys last
try:
lst = [getattr(cgm_abs.igm_sys, k) for cgm_abs in self.cgm_abs]
except AttributeError:
print('cgm.core: Attribute not found!')
pdb.set_trace()
# Special cases
if k == 'coord':
ra = [coord.fk5.ra.value for coord in lst]
dec = [coord.fk5.dec.value for coord in lst]
lst = SkyCoord(ra=ra, dec=dec, unit='deg')
if self.mask is not None:
return lst[self.mask]
else:
return lst
elif k == 'scoord': # Sightline coordinates
lst = [getattr(cgm_abs.igm_sys, 'coord') for cgm_abs in self.cgm_abs]
ra = [coord.fk5.ra.value for coord in lst]
dec = [coord.fk5.dec.value for coord in lst]
lst = SkyCoord(ra=ra, dec=dec, unit='deg')
if self.mask is not None:
return lst[self.mask]
else:
return lst
# Return array
return lst_to_array(lst, mask=self.mask)
def __repr__(self):
str1 = '<CGM_Survey: {:s} nsys={:d}, ref={:s}>\n'.format(self.survey, self.nsys, self.ref)
for ii in range(self.nsys):
str1 = str1+self.cgm_abs[ii].igm_sys.__repr__()+'\n'
return str1
|
[
"xavier@ucolick.org"
] |
xavier@ucolick.org
|
e33058af373d06e2f01f0dedd2779cbce9ab58b6
|
cef082f9fd218e807ad6deedfc95b485fe4152a0
|
/SecondWeek/example_http.py
|
601ec43cf64d916ec85671ec7715c03db19b71b8
|
[] |
no_license
|
banziha104/DjangoProjects
|
bdb814dbcaa9a0c50d05b42ee7e319a5129dfef9
|
802e412fcabedd5e08abbf14f2587bd2231491ff
|
refs/heads/master
| 2021-01-23T05:29:17.563194
| 2017-10-17T10:28:16
| 2017-10-17T10:28:16
| 102,469,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
# python3 -m http.server 8000
# localhost:8000
# python3 -m http.server 8000 --bind 127.0.0.1
# python3 -m http.server --cgi 8000
import http.server
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
with socketserver.TCPServer(("",PORT), Handler) as httpd:
print("serving at port", PORT)
httpd.serve_forever()
|
[
"they6687@naver.com"
] |
they6687@naver.com
|
3cd443b57e159232737995be844e35ac1210e2c3
|
72b77f97876983025eb05a5aa1d6f248a1be3074
|
/difference_between_element_sum_and_digit_sum_of_an_array.py
|
8bac67b7f16134a05d0c93079b75484b8335f70b
|
[
"Apache-2.0"
] |
permissive
|
erjan/coding_exercises
|
4c6bccb2cdac65ccbc3107a482914275ecd157f7
|
68dac358a6d4dabd41d47dbd4addb2ec50e0ca11
|
refs/heads/master
| 2023-09-02T07:25:30.886175
| 2023-08-27T06:13:06
| 2023-08-27T06:13:06
| 236,281,070
| 5
| 0
|
Apache-2.0
| 2020-05-05T15:08:49
| 2020-01-26T07:32:09
|
Python
|
UTF-8
|
Python
| false
| false
| 727
|
py
|
'''
You are given a positive integer array nums.
The element sum is the sum of all the elements in nums.
The digit sum is the sum of all the digits (not necessarily distinct) that appear in nums.
Return the absolute difference between the element sum and digit sum of nums.
Note that the absolute difference between two integers x and y is defined as |x - y|.
'''
class Solution:
def differenceOfSum(self, nums: List[int]) -> int:
elementsum = sum(nums)
digitsum = 0
for n in nums:
item = list(str(n))
temp = 0
for x in item:
temp+= int(x)
digitsum+=temp
return abs(elementsum-digitsum)
|
[
"noreply@github.com"
] |
erjan.noreply@github.com
|
958cfc9e90e536eeeaddfdc8e2dc87dfd64c8875
|
faa83d63a23aec7c4f45c6ce6d140985a9fb2d50
|
/tests/conftest.py
|
7baac17e56d03fac2affcd5502b8e75d9cc40e56
|
[
"LicenseRef-scancode-other-permissive",
"MIT"
] |
permissive
|
thomasyi17/diana2
|
dbf23382f5f84bd9cf86ce531f46452f0083e7f6
|
983e58ef0a5fe0d820a56c41c823369754019171
|
refs/heads/master
| 2023-03-24T15:13:29.421614
| 2022-06-12T21:42:28
| 2022-06-12T21:42:28
| 167,248,482
| 0
| 0
|
MIT
| 2019-06-25T19:41:36
| 2019-01-23T20:22:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
import sys
import pytest
import docker
from diana.utils.endpoint import Containerized
sys.path.append('utils')
@pytest.fixture(scope="session")
def setup_orthanc0():
S = mk_orthanc()
yield S
print("Tearing down orthanc fixture")
S.stop_container()
@pytest.fixture(scope="session")
def setup_orthanc1():
S = mk_orthanc(8043, 4243, 8042, 4242)
yield S
print("Tearing down orthanc fixture")
S.stop_container()
def mk_orthanc(http_port=8042, dcm_port=4242, remote_peer=8043, remote_mod=4243):
print("Standing up orthanc fixture")
dkr_name = "orthanc-{}".format(http_port)
import socket
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
S = Containerized(
dkr_name = dkr_name,
dkr_image = "derekmerck/orthanc-confd",
dkr_ports = {"8042/tcp": http_port, "4242/tcp": dcm_port},
dkr_env = {"ORTHANC_MOD_0": "mod0,ORTHANC{},{},{}".format(
remote_mod, host_ip, remote_mod),
"ORTHANC_PEER_0": "peer0,http://{}:{},orthanc,passw0rd!".format(
host_ip, remote_peer),
"ORTHANC_AET": "ORTHANC{}".format(dcm_port),
"ORTHANC_VERBOSE": "true"}
)
S.start_container()
client = docker.from_env()
c = client.containers.get(dkr_name)
print("{}: {}".format(S.dkr_name, c.status))
return S
@pytest.fixture(scope="session")
def setup_redis():
S = mk_redis()
yield S
print("Tearing down redis fixture")
S.stop_container()
def mk_redis():
print("Standing up redis fixture")
dkr_name = "redis"
S = Containerized(
dkr_name = dkr_name,
dkr_image = "redis",
dkr_ports = {"6379/tcp": 6379}
)
S.start_container()
client = docker.from_env()
c = client.containers.get(dkr_name)
print("{}: {}".format(S.dkr_name, c.status))
return S
|
[
"derek_merck@brown.edu"
] |
derek_merck@brown.edu
|
32138084abc6dd3ae53877add615df18640ec606
|
92f69f1f33f6b3aa29dc4f3ccce7d4a06eb24bdf
|
/deploy/infer_onnx_tensorrt.py
|
be847017b35e952bf6f78ecc7da8ac525342cab4
|
[
"MIT"
] |
permissive
|
carlsummer/lcnn
|
5d0b4c81e3b626e0380fdd36ad5685f3a6b9eb8f
|
b7ad7fa5502243ac50ca15a355e0001c5992d050
|
refs/heads/master
| 2023-06-21T05:45:44.910052
| 2021-07-29T00:55:33
| 2021-07-29T00:55:33
| 384,020,516
| 0
| 0
|
MIT
| 2021-07-08T06:13:42
| 2021-07-08T06:13:41
| null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:7/26/2021 10:12 AM
# @File:infer_net1_onnx.py
import argparse
import onnx
import onnx_tensorrt.backend as backend
import torch
from imutils import paths
from deploy.torch2onnx import get_image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="获取杭州人工认为有缺陷大图")
parser.add_argument('--devices',
default=r"0",
help="没有分的文件夹")
parser.add_argument('--onnx_path',
default=r"/home/zengxh/workspace/lcnn/logs/210726-144038-88f281a-baseline/checkpoint_best.onnx",
help="没有分的文件夹")
parser.add_argument('--predict_dir',
default=r"/home/zengxh/medias/data/ext/creepageDistance/20210714/smallimg/tb/org",
help="没有分的文件夹")
parser.add_argument('--predict_type',
default=r"tb",
help="没有分的文件夹")
opt = parser.parse_args()
model = onnx.load(opt.onnx_path)
engine = backend.prepare(model, device='CUDA:0')
image_paths = list(paths.list_images(opt.predict_dir))
image_path = image_paths[0]
image = get_image(image_path, opt.predict_type).cuda()
junc = torch.zeros(1, 2).cuda()
jtyp = torch.zeros(1, dtype=torch.uint8).cuda()
Lpos = torch.zeros(2, 2, dtype=torch.uint8).cuda()
ret = engine.run(image)
print(ret)
|
[
"zengxh@chint.com"
] |
zengxh@chint.com
|
7306eaafeb9996f56c25f6148352a8b7c266f68f
|
fcfc5d6b6fe509072ace7522a8b48e7c3a8d80a8
|
/api/upload/urls.py
|
0f1e7170bdc27c20e7ede08af83e29b54b07da69
|
[] |
no_license
|
Torque-Webdev/ChamipnshpDatabase
|
0323d13d39f27aa5516b68891e3cfde51ad063f4
|
a3f44f7786d98afb58744e0410904750906f5ae0
|
refs/heads/master
| 2023-05-01T22:19:14.180758
| 2021-05-21T16:41:16
| 2021-05-21T16:41:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
from django.urls import path
from .views import FileViewSet
from rest_framework import routers
from django.urls import path, include
app_name = "upload"
router = routers.DefaultRouter()
router.register('file', FileViewSet)
urlpatterns = [
path('', include(router.urls))
]
|
[
"sibbengaharry@gmail.com"
] |
sibbengaharry@gmail.com
|
d118b958141f7a74f558cabfc48f8c64170a1520
|
f87f51ec4d9353bc3836e22ac4a944951f9c45c0
|
/.history/HW03_20210706182647.py
|
1346d8dad64a99274877a7859b1338bb7f9edce1
|
[] |
no_license
|
sanjayMamidipaka/cs1301
|
deaffee3847519eb85030d1bd82ae11e734bc1b7
|
9ddb66596497382d807673eba96853a17884d67b
|
refs/heads/main
| 2023-06-25T04:52:28.153535
| 2021-07-26T16:42:44
| 2021-07-26T16:42:44
| 389,703,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
"""
Georgia Institute of Technology - CS1301
HW03 - Strings and Lists
Collaboration Statement:
"""
#########################################
"""
Function Name: movieNight()
Parameters: subtitle (str)
Returns: fixed subtitle (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def movieNight(subtitle):
newSubtitle = ''
for i in subtitle:
if not i.isdigit():
newSubtitle += i
return newSubtitle
"""
Function Name: longestWord()
Parameters: sentence (str)
Returns: longest word (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def longestWord(sentence):
return max(sentence.split(' '))
"""
Function Name: tennisMatch()
Parameters: player1 (str), player2 (str), matchRecord (str)
Returns: game statement (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: freshFruit()
Parameters: barcodes (list), startIndex (int), stopIndex (int)
Returns: freshest barcode (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: highestSum()
Parameters: stringList (list)
Returns: highest sum index (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
# subtitle = "Mr. and M4rs. Dursley of nu28mber four, Privet Drive, wer903e proud to say th6at they we6re perfectly norm3al, tha894nk you ve89ry much."
# print(movieNight(subtitle))
sentence = " Left foot, right foot, levitatin’ "
longestWord(sentence)
|
[
"sanjay.mamidipaka@gmail.com"
] |
sanjay.mamidipaka@gmail.com
|
68da85645730e39e77b20e6b4145c05c4e7f8e65
|
c8b427f7d548d2028911682ec1fcdcd0150fd1c3
|
/encoding/datasets/cocostuff.py
|
e3e06d059fbc752baa2bd5098006bcabb120653b
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
yougoforward/GSFramework
|
544f120681777752cda9dcd4872170118e2f6073
|
000060691fcf252cbdf834326db19415f9754cdf
|
refs/heads/master
| 2022-12-21T07:59:48.571069
| 2020-07-10T12:57:34
| 2020-07-10T12:57:34
| 271,774,293
| 0
| 1
|
NOASSERTION
| 2022-12-19T09:48:36
| 2020-06-12T10:44:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,726
|
py
|
###########################################################################
# Created by: CASIA IVA
# Email: jliu@nlpr.ia.ac.cn
# Copyright (c) 2018
###########################################################################
import os
import sys
import numpy as np
import random
import math
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
import re
from tqdm import tqdm
from .base import BaseDataset
class CocostuffSegmentation(BaseDataset):
BASE_DIR = 'cocostuff'
NUM_CLASS = 171
def __init__(self, root='./datasets', split='train',
mode=None, transform=None, target_transform=None, **kwargs):
super(CocostuffSegmentation, self).__init__(
root, split, mode, transform, target_transform, **kwargs)
# assert exists
root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(root), "Please download the dataset!!"
self.images, self.masks = _get_cocostuff_pairs(root, split)
if split != 'vis':
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise (RuntimeError("Found 0 images in subfolders of: \
" + root + "\n"))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'vis':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def _mask_transform(self, mask):
target = np.array(mask).astype('int32')-1
# target[target == 255] = -1
return torch.from_numpy(target).long()
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 0
def _get_cocostuff_pairs(folder, split='train'):
def get_path_pairs(folder, split_f):
img_paths = []
mask_paths = []
with open(split_f, 'r') as lines:
for line in tqdm(lines):
ll_str = re.split(' ', line)
imgpath = os.path.join(folder, ll_str[0].lstrip('/').lstrip(
).rstrip())
maskpath = os.path.join(folder, ll_str[1].lstrip('/').rstrip())
if os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask:', maskpath)
return img_paths, mask_paths
if split == 'train':
split_f = os.path.join(folder, 'train.txt')
img_paths, mask_paths = get_path_pairs(folder, split_f)
elif split == 'val':
split_f = os.path.join(folder, 'val.txt')
img_paths, mask_paths = get_path_pairs(folder, split_f)
elif split == 'test':
split_f = os.path.join(folder, 'test.txt')
img_paths, mask_paths = get_path_pairs(folder, split_f)
else:
split_f = os.path.join(folder, 'all.txt')
img_paths, mask_paths = get_path_pairs(folder, split_f)
return img_paths, mask_paths
|
[
"908865817@qq.com"
] |
908865817@qq.com
|
74c32424b84aeca23312e1bc1337aeae4ce88b19
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5769900270288896_0/Python/Yizow/noisyNeighbors.py
|
56699b486ce068641289c157a81da28e58850b66
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,779
|
py
|
import sys
def main():
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
with open(inputFileName, 'r') as inputFile:
with open(outputFileName, 'w') as outputFile:
numTestCases = int(inputFile.readline())
for testNum in range(numTestCases):
if testNum > 0:
outputFile.write("\n")
print testNum
line1 = inputFile.readline().split()
R, C, N = int(line1[0]), int(line1[1]), int(line1[2])
outputFile.write("Case #%d: %d" % (testNum+1, calcHappy(R,C,N)))
def calcHappy(R, C, N):
if N == R*C:
return totalWalls(R,C)
if N <= (R*C+1)/2:
return 0
else:
empty = R*C-N
if min(R, C) == 1:
return totalWalls(R,C) - empty * 2
if min(R, C) == 2:
length = max(R,C)
if empty == length - 1:
return totalWalls(R, C) - ( 3*empty - 1)
return totalWalls(R, C) - (3 * empty)
inR, inC = R-2, C-2
innerRooms = inR*inC
if innerRooms == 1:
if N == 8:
return totalWalls(R, C) - 4
if N == 7:
return totalWalls(R, C) - 6
if N == 6:
return totalWalls(R, C) - 9
if (innerRooms+1)/2 >= empty:
return totalWalls(R, C) - empty*4
happy = 4*((innerRooms+1)/2)
empty -= (innerRooms+1)/2
edgeRooms = (R*2+(C-2)*2)/2
if R%2 == 0 or C%2 == 0:
edgeRooms += 2
if empty < edgeRooms:
happy += 3*empty
return totalWalls(R, C) - happy
return 1/0
def totalWalls(R, C):
return R*(C-1) + C*(R-1)
main()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
e636ed229257f4495131784688b2d673f17fedc4
|
86f8b6366fefdc91f9bd658efcb292001fe02da0
|
/src/rosbag_to_file/scripts/rosbag_to_csv.py
|
cbc70a3e2768d76ec7e4b9324606857c49befc69
|
[
"MIT"
] |
permissive
|
mingtsung86/dodobot-ros
|
32a1a42fbf5b7b9e7415f5e4ba21e9574827e5ba
|
85971caeac958db2f592eeeca13b01b4d365eebd
|
refs/heads/master
| 2023-06-09T08:07:22.867868
| 2021-07-02T05:58:03
| 2021-07-02T05:58:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,946
|
py
|
import csv
import rospy
from datetime import datetime
import utils
def bag_to_csv(options):
writers = dict()
for topic, msg, timestamp in utils.enumerate_bag(options, options.path):
if topic in writers:
writer = writers[topic][0]
else:
f = open_csv(options, topic)
writer = csv.writer(f)
writers[topic] = writer, f
# header
if options.header:
header_row = ["date", "time"]
message_type_to_csv(header_row, msg)
writer.writerow(header_row)
row = [
utils.to_datestr(timestamp.to_time()),
timestamp.to_time()
]
message_to_csv(row, msg, flatten=not options.header)
writer.writerow(row)
for writer, f in writers.values():
f.close()
def open_csv(options, topic_name):
path = utils.get_output_path(options, topic_name)
path += ".csv"
return open(path, 'w')
def message_to_csv(row, msg, flatten=False):
"""
row: list
msg: message
"""
for key, value in utils.iter_msg(msg):
msg_str = str(value)
if msg_str.find(",") is not -1:
if flatten:
msg_str = msg_str.strip("(")
msg_str = msg_str.strip(")")
msg_str = msg_str.strip(" ")
else:
msg_str = "\"" + msg_str + "\""
row.append(msg_str)
def format_header_key(key):
header = ""
for index, subfield in enumerate(key):
if type(subfield) == int:
header += "[%s]" % subfield
else:
if index == 0:
header += subfield
else:
header += "." + subfield
return header
def message_type_to_csv(row, msg, parent_content_name=""):
"""
row: list
msg: message
"""
for key, value in utils.iter_msg(msg):
row.append(format_header_key(key))
|
[
"woz4tetra@gmail.com"
] |
woz4tetra@gmail.com
|
e5a0abd466402d924ea93b9c01766280787d1b41
|
5ac15873df4df7aeef3908ea7aebe0805cca0181
|
/flask_mrbob/templates/project/+project.name+/base/context_processors.py
|
833ad7be23d69c4d1f2722e10bb1eaacf874d3c6
|
[
"BSD-3-Clause"
] |
permissive
|
jstacoder/flask-manage
|
0a942cef5a32b2966dc73cf14c5a58682b0af0f2
|
76f1802c4b7042c482dcdcc884fbc0e4fd114b5e
|
refs/heads/master
| 2020-04-19T18:18:11.872007
| 2014-07-02T15:24:38
| 2014-07-02T15:24:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# -*- coding: utf-8 -*-
"""
base.context_processors
~~~~~~~~~~~~~~~~~~~~~~~
The most common context processors
for the whole project.
"""
from flask.helpers import url_for
def common_context():
return {
'my_email': 'kyle@level2designs.com',
'type':type,
'dir':dir,
}
def common_forms():
return {}
|
[
"kyle@level2designs.com"
] |
kyle@level2designs.com
|
b9095da8a1ed6d8c33f37c41cd94d2c31737a97d
|
cc2f91415451ba988a009c0e68303ef6a0b083c1
|
/trydjango/settings.py
|
adefa67a5c8bcd1037fcebd4c8ffbb300e50b2d1
|
[] |
no_license
|
Jordan-Rob/Jcfe-Django-tutorial
|
12a4ed169c37020cef2708202cb2e2279ae5a691
|
fa970f92943cd613c455d8db187a22ce3508e7d4
|
refs/heads/master
| 2020-12-21T13:35:24.264645
| 2020-01-28T16:44:12
| 2020-01-28T16:44:12
| 236,446,098
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
"""
Django settings for trydjango project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yv#a7f8-6c7fgt_)bmng@o2igm$60ym-#y=nd$6z1=%n3_!k7j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products.apps.ProductsConfig',
'pages.apps.PagesConfig',
'Blog.apps.BlogConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trydjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trydjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'trydjango',
'USER': 'root',
'PASSWORD': 'thinking23house',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"jordanrob709@gmail.com"
] |
jordanrob709@gmail.com
|
008b269902e9e83c6d68e09034f245162da4c7ff
|
2342b8737b9ffeb9715158b8ec74a33c7a4947f6
|
/koku/masu/util/azure/common.py
|
8456c9274467140607943451c04e58784967db68
|
[
"Apache-2.0"
] |
permissive
|
project-koku/koku
|
444d8df05da5416c9cee606c42481c99be45f13d
|
0416e5216eb1ec4b41c8dd4999adde218b1ab2e1
|
refs/heads/main
| 2023-08-20T11:30:17.510182
| 2023-08-17T18:27:30
| 2023-08-17T18:27:30
| 126,496,611
| 225
| 94
|
Apache-2.0
| 2023-09-14T17:38:08
| 2018-03-23T14:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 7,156
|
py
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Common util functions."""
import datetime
import logging
import re
import uuid
from enum import Enum
from itertools import chain
import pandas as pd
from django_tenants.utils import schema_context
from api.models import Provider
from masu.database.azure_report_db_accessor import AzureReportDBAccessor
from masu.database.provider_db_accessor import ProviderDBAccessor
from masu.util.ocp.common import match_openshift_labels
LOG = logging.getLogger(__name__)
INGRESS_REQUIRED_COLUMNS = {
"SubscriptionGuid",
"ResourceGroup",
"ResourceLocation",
"UsageDateTime",
"MeterCategory",
"MeterSubcategory",
"MeterId",
"MeterName",
"MeterRegion",
"UsageQuantity",
"ResourceRate",
"PreTaxCost",
"ConsumedService",
"ResourceType",
"InstanceId",
"OfferId",
"AdditionalInfo",
"ServiceInfo1",
"ServiceInfo2",
"ServiceName",
"ServiceTier",
"Currency",
"UnitOfMeasure",
}
INGRESS_ALT_COLUMNS = {
"SubscriptionId",
"ResourceGroup",
"ResourceLocation",
"Date",
"MeterCategory",
"MeterSubCategory",
"MeterId",
"MeterName",
"MeterRegion",
"UnitOfMeasure",
"Quantity",
"EffectivePrice",
"CostInBillingCurrency",
"ConsumedService",
"ResourceId",
"OfferId",
"AdditionalInfo",
"ServiceInfo1",
"ServiceInfo2",
"ResourceName",
"ReservationId",
"ReservationName",
"UnitPrice",
"PublisherType",
"PublisherName",
"ChargeType",
"BillingAccountId",
"BillingAccountName",
"BillingCurrencyCode",
"BillingPeriodStartDate",
"BillingPeriodEndDate",
"ServiceFamily",
}
class AzureBlobExtension(Enum):
manifest = "_manifest.json"
csv = ".csv"
json = ".json"
def extract_uuids_from_string(source_string):
"""
Extract uuids out of a given source string.
Args:
source_string (Source): string to locate UUIDs.
Returns:
([]) List of UUIDs found in the source string
"""
uuid_regex = "[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}"
found_uuid = re.findall(uuid_regex, source_string, re.IGNORECASE)
return found_uuid
def get_local_file_name(cur_key):
"""
Return the local file name for a given cost usage report key.
If an assemblyID is present in the key, it will prepend it to the filename.
Args:
cur_key (String): reportKey value from manifest file.
example:
With AssemblyID: /koku/20180701-20180801/882083b7-ea62-4aab-aa6a-f0d08d65ee2b/koku-1.csv.gz
Without AssemblyID: /koku/20180701-20180801/koku-Manifest.json
Returns:
(String): file name for the local file,
example:
With AssemblyID: "882083b7-ea62-4aab-aa6a-f0d08d65ee2b-koku-1.csv.gz"
Without AssemblyID: "koku-Manifest.json"
"""
local_file_name = cur_key.split("/")[-1]
return local_file_name
def get_bills_from_provider(provider_uuid, schema, start_date=None, end_date=None):
"""
Return the Azure bill IDs given a provider UUID.
Args:
provider_uuid (str): Provider UUID.
schema (str): Tenant schema
start_date (datetime): Start date for bill IDs.
end_date (datetime) End date for bill IDs.
Returns:
(list): Azure cost entry bill objects.
"""
if isinstance(start_date, (datetime.datetime, datetime.date)):
start_date = start_date.replace(day=1)
start_date = start_date.strftime("%Y-%m-%d")
if isinstance(end_date, (datetime.datetime, datetime.date)):
end_date = end_date.strftime("%Y-%m-%d")
with ProviderDBAccessor(provider_uuid) as provider_accessor:
provider = provider_accessor.get_provider()
if provider.type not in (Provider.PROVIDER_AZURE, Provider.PROVIDER_AZURE_LOCAL):
err_msg = f"Provider UUID is not an Azure type. It is {provider.type}"
LOG.warning(err_msg)
return []
with AzureReportDBAccessor(schema) as report_accessor:
with schema_context(schema):
bills = report_accessor.get_cost_entry_bills_query_by_provider(provider.uuid)
if start_date:
bills = bills.filter(billing_period_start__gte=start_date)
if end_date:
bills = bills.filter(billing_period_start__lte=end_date)
bills = list(bills.all())
return bills
def match_openshift_resources_and_labels(data_frame, cluster_topologies, matched_tags):
"""Filter a dataframe to the subset that matches an OpenShift source."""
nodes = chain.from_iterable(cluster_topology.get("nodes", []) for cluster_topology in cluster_topologies)
volumes = chain.from_iterable(
cluster_topology.get("persistent_volumes", []) for cluster_topology in cluster_topologies
)
matchable_resources = list(nodes) + list(volumes)
resource_id_df = data_frame["resourceid"]
if resource_id_df.isna().values.all():
resource_id_df = data_frame["instanceid"]
LOG.info("Matching OpenShift on Azure by resource ID.")
resource_id_matched = resource_id_df.str.contains("|".join(matchable_resources))
data_frame["resource_id_matched"] = resource_id_matched
tags = data_frame["tags"]
tags = tags.str.lower()
special_case_tag_matched = tags.str.contains(
"|".join(["openshift_cluster", "openshift_project", "openshift_node"])
)
data_frame["special_case_tag_matched"] = special_case_tag_matched
if matched_tags:
tag_keys = []
tag_values = []
for tag in matched_tags:
tag_keys.extend(list(tag.keys()))
tag_values.extend(list(tag.values()))
tag_matched = tags.str.contains("|".join(tag_keys)) & tags.str.contains("|".join(tag_values))
data_frame["tag_matched"] = tag_matched
any_tag_matched = tag_matched.any()
if any_tag_matched:
tag_df = pd.concat([tags, tag_matched], axis=1)
tag_df.columns = ("tags", "tag_matched")
tag_subset = tag_df[tag_df.tag_matched == True].tags # noqa: E712
LOG.info("Matching OpenShift on Azure tags.")
matched_tag = tag_subset.apply(match_openshift_labels, args=(matched_tags,))
data_frame["matched_tag"] = matched_tag
data_frame["matched_tag"].fillna(value="", inplace=True)
else:
data_frame["matched_tag"] = ""
else:
data_frame["tag_matched"] = False
data_frame["matched_tag"] = ""
openshift_matched_data_frame = data_frame[
(data_frame["resource_id_matched"] == True) # noqa: E712
| (data_frame["special_case_tag_matched"] == True) # noqa: E712
| (data_frame["matched_tag"] != "") # noqa: E712
]
openshift_matched_data_frame["uuid"] = openshift_matched_data_frame.apply(lambda _: str(uuid.uuid4()), axis=1)
openshift_matched_data_frame = openshift_matched_data_frame.drop(
columns=["special_case_tag_matched", "tag_matched"]
)
return openshift_matched_data_frame
|
[
"noreply@github.com"
] |
project-koku.noreply@github.com
|
ce7cef500f46983152daf5c63552763ab26651fc
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/third_party/google/auth/metrics.py
|
f7303282c9410598e9ec61290b667f7e8fed6423
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,089
|
py
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" We use x-goog-api-client header to report metrics. This module provides
the constants and helper methods to construct x-goog-api-client header.
"""
import platform
from google.auth import version
API_CLIENT_HEADER = "x-goog-api-client"
# Auth request type
REQUEST_TYPE_ACCESS_TOKEN = "auth-request-type/at"
REQUEST_TYPE_ID_TOKEN = "auth-request-type/it"
REQUEST_TYPE_MDS_PING = "auth-request-type/mds"
REQUEST_TYPE_REAUTH_START = "auth-request-type/re-start"
REQUEST_TYPE_REAUTH_CONTINUE = "auth-request-type/re-cont"
# Credential type
CRED_TYPE_USER = "cred-type/u"
CRED_TYPE_SA_ASSERTION = "cred-type/sa"
CRED_TYPE_SA_JWT = "cred-type/jwt"
CRED_TYPE_SA_MDS = "cred-type/mds"
CRED_TYPE_SA_IMPERSONATE = "cred-type/imp"
# Versions
def python_and_auth_lib_version():
return "gl-python/{} auth/{}".format(platform.python_version(), version.__version__)
# Token request metric header values
# x-goog-api-client header value for access token request via metadata server.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/mds"
def token_request_access_token_mds():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ACCESS_TOKEN, CRED_TYPE_SA_MDS
)
# x-goog-api-client header value for ID token request via metadata server.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/mds"
def token_request_id_token_mds():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_MDS
)
# x-goog-api-client header value for impersonated credentials access token request.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/imp"
def token_request_access_token_impersonate():
return "{} {} {}".format(
python_and_auth_lib_version(),
REQUEST_TYPE_ACCESS_TOKEN,
CRED_TYPE_SA_IMPERSONATE,
)
# x-goog-api-client header value for impersonated credentials ID token request.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/imp"
def token_request_id_token_impersonate():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_IMPERSONATE
)
# x-goog-api-client header value for service account credentials access token
# request (assertion flow).
# Example: "gl-python/3.7 auth/1.1 auth-request-type/at cred-type/sa"
def token_request_access_token_sa_assertion():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ACCESS_TOKEN, CRED_TYPE_SA_ASSERTION
)
# x-goog-api-client header value for service account credentials ID token
# request (assertion flow).
# Example: "gl-python/3.7 auth/1.1 auth-request-type/it cred-type/sa"
def token_request_id_token_sa_assertion():
return "{} {} {}".format(
python_and_auth_lib_version(), REQUEST_TYPE_ID_TOKEN, CRED_TYPE_SA_ASSERTION
)
# x-goog-api-client header value for user credentials token request.
# Example: "gl-python/3.7 auth/1.1 cred-type/u"
def token_request_user():
return "{} {}".format(python_and_auth_lib_version(), CRED_TYPE_USER)
# Miscellenous metrics
# x-goog-api-client header value for metadata server ping.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/mds"
def mds_ping():
return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_MDS_PING)
# x-goog-api-client header value for reauth start endpoint calls.
# Example: "gl-python/3.7 auth/1.1 auth-request-type/re-start"
def reauth_start():
return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_REAUTH_START)
# x-goog-api-client header value for reauth continue endpoint calls.
# Example: "gl-python/3.7 auth/1.1 cred-type/re-cont"
def reauth_continue():
return "{} {}".format(python_and_auth_lib_version(), REQUEST_TYPE_REAUTH_CONTINUE)
def add_metric_header(headers, metric_header_value):
"""Add x-goog-api-client header with the given value.
Args:
headers (Mapping[str, str]): The headers to which we will add the
metric header.
metric_header_value (Optional[str]): If value is None, do nothing;
if headers already has a x-goog-api-client header, append the value
to the existing header; otherwise add a new x-goog-api-client
header with the given value.
"""
if not metric_header_value:
return
if API_CLIENT_HEADER not in headers:
headers[API_CLIENT_HEADER] = metric_header_value
else:
headers[API_CLIENT_HEADER] += " " + metric_header_value
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
3106a84c83a2be4575d24d0c7b941bb53fec2b47
|
4024383a9ba300d266611c61e358dd72880350ae
|
/tests/tests_top/test_topchart_music.py
|
18588c4711e15462f6272b5bfdbcde80654e45c4
|
[
"MIT"
] |
permissive
|
dbeley/senscritiquescraper
|
54bed18dc7c9348a780c15338ebd472cf33feff1
|
8e199422475c44f0dbef53f2471066c39afec949
|
refs/heads/master
| 2023-08-16T18:12:10.008142
| 2023-08-04T20:51:04
| 2023-08-04T20:51:04
| 192,126,896
| 15
| 1
|
MIT
| 2022-12-08T12:40:47
| 2019-06-15T21:52:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
from senscritiquescraper.utils.row_utils import row_utils
def test_music_rank(topchart_row_music):
rank = row_utils.get_rank(topchart_row_music)
assert rank == "1"
def test_music_title(topchart_row_music):
title = row_utils.get_title(topchart_row_music)
assert title == "The Dark Side of the Moon"
def test_music_url(topchart_row_music):
url = row_utils.get_url(topchart_row_music)
assert url.startswith("https")
def test_music_year(topchart_row_music):
year = row_utils.get_year(topchart_row_music)
assert year == "1973"
def test_music_release_date(topchart_row_music):
release_date = row_utils.get_baseline_0(topchart_row_music)
assert release_date == "23 mars 1973"
def test_music_genre(topchart_row_music):
genre = row_utils.get_baseline_1(topchart_row_music)
assert genre == "Art rock etprog rock"
def test_music_number_songs(topchart_row_music):
length = row_utils.get_number_of_seasons(topchart_row_music)
assert length == "10 morceaux"
def test_music_cover(topchart_row_music):
cover_url = row_utils.get_picture_url(topchart_row_music)
assert cover_url.startswith("https")
def test_music_artist(topchart_row_music):
artist = row_utils.get_producer(topchart_row_music)
assert artist == "Pink Floyd"
def test_music_average_rating(topchart_row_music):
average_rating = row_utils.get_average_rating(topchart_row_music)
assert len(average_rating) == 3
def test_music_number_ratings(topchart_row_music):
number_ratings = row_utils.get_number_of_ratings(topchart_row_music)
assert int(number_ratings) > 35000
|
[
"6568955+dbeley@users.noreply.github.com"
] |
6568955+dbeley@users.noreply.github.com
|
4e83059941f89096991608bfcdb404009f4e710c
|
55ceefc747e19cdf853e329dba06723a44a42623
|
/_CodeTopics/LeetCode_contest/weekly/weekly2020/193/WA--193_2.py
|
8a599c9e2ac46086ec219372aec9f53a4f8504f7
|
[] |
no_license
|
BIAOXYZ/variousCodes
|
6c04f3e257dbf87cbe73c98c72aaa384fc033690
|
ee59b82125f100970c842d5e1245287c484d6649
|
refs/heads/master
| 2023-09-04T10:01:31.998311
| 2023-08-26T19:44:39
| 2023-08-26T19:44:39
| 152,967,312
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
class Solution(object):
def findLeastNumOfUniqueInts(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: int
"""
length = len(arr)
numdic = dict()
for i in range(length):
if numdic.has_key(arr[i]):
numdic[arr[i]] += 1
else:
numdic[arr[i]] = 1
# valuelist = numdic.values().sort()
# 这里用上面这句返回的是None
valuelist = sorted(numdic.values())
valuetypes = len(valuelist)
for i in range(valuetypes):
k -= valuelist[i]
if k >= 0:
continue
else:
return valuetypes - i
"""
https://leetcode-cn.com/contest/weekly-contest-193/submissions/detail/78798394/
41 / 43 个通过测试用例
状态:解答错误
输入: [1]
1
输出: null
预期: 0
"""
|
[
"noreply@github.com"
] |
BIAOXYZ.noreply@github.com
|
dfd413d017821009e8b3b472eccf594be8be195c
|
6a7fc59a1fe8c7cd9593ae436b222f96538998e1
|
/predict/predictor.py
|
38afe69d4d4425feb707771c57228882a8481a4c
|
[] |
no_license
|
ductri/few_shot_learning
|
df57919b41aa1f299cadc3dff78e3ff5eb5735d2
|
ae24df58df5df031e517d221173e263d630d461c
|
refs/heads/master
| 2020-04-15T02:47:16.515192
| 2019-01-08T09:39:38
| 2019-01-08T09:39:38
| 164,324,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
import tensorflow as tf
import logging
import pickle
import numpy as np
class Predictor:
def __init__(self, path_to_params, path_to_model):
with open(path_to_params, 'rb') as input_file:
self.params_dict = pickle.load(input_file)
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.Session()
with self.sess.as_default():
saver = tf.train.import_meta_graph('{}.meta'.format(path_to_model))
saver.restore(self.sess, path_to_model)
logging.info('Restored saved model at %s', path_to_model)
def predict(self, list_images_1, list_images_2):
X = list(zip(list_images_1, list_images_2))
return self._predict(X)
def _predict(self, X):
tf_predict = self.graph.get_tensor_by_name(self.params_dict['tf_predict'])
feed_dict = self.__build_feed_dict(X)
return np.squeeze(self.sess.run(tf_predict, feed_dict=feed_dict))
def _predict_prob(self, X):
tf_predict_prob = self.graph.get_tensor_by_name(self.params_dict['tf_predict_prob'])
feed_dict = self.__build_feed_dict(X)
return np.squeeze(self.sess.run(tf_predict_prob, feed_dict=feed_dict))
def predict_prob(self, list_images_1, list_images_2):
X = list(zip(list_images_1, list_images_2))
return self._predict_prob(X)
def __build_feed_dict(self, X):
tf_X = self.graph.get_tensor_by_name(self.params_dict['tf_X'])
feed_dict_func = self.params_dict['feed_dict_for_infer_func']
feed_dict = feed_dict_func(tf_X, X)
return feed_dict
|
[
"ductricse@gmail.com"
] |
ductricse@gmail.com
|
ccddd8581c9ee9fa1b2814939eb690869a35b89d
|
0308403d211ae8161d4ea5e283ccba9118ef11fb
|
/djangogirls/settings.py
|
03ead1b5dbfb437cd6d2e1a2f59d7533b31e47ca
|
[] |
no_license
|
mkone112/my-first-blog
|
5dfe6c1221018c8df83059e985cfc0ec1d099168
|
977bdd68dd07f52733d5026bc190239d01e8964d
|
refs/heads/master
| 2021-04-14T09:14:24.235240
| 2020-04-13T05:32:23
| 2020-04-13T05:32:23
| 249,221,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,261
|
py
|
"""
Django settings for djangogirls project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vk8-ro6&3n$&8wp5cmyk*h_z66gmu(00p8h0@mxkbof$tse-&#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangogirls.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangogirls.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"mkone112@gmail.com"
] |
mkone112@gmail.com
|
2b4dd33d55994aa1a2b85efdc4c041ee32c81d8d
|
7a1768da35f0746274b894ee170e732cc3490b19
|
/file_server_box_sync/aiofiles/__init__.py
|
049c6e4936449e1d819877e9276f9f07ac45ff89
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
dtrodger/file-server-box-syncer-public
|
5bf2e06b19a0ee6f382784ff2c9b0e7218175495
|
6296a66352df8c06ea691922a3728aeb487ab246
|
refs/heads/master
| 2023-04-16T03:47:29.161573
| 2021-04-30T03:04:34
| 2021-04-30T03:04:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
"""Utilities for asyncio-friendly file handling."""
from file_server_box_sync.aiofiles.threadpool import open
from file_server_box_sync.aiofiles import os
__version__ = "0.5.0.dev0"
__all__ = ['open', 'os']
|
[
"dtrodger20@gmail.com"
] |
dtrodger20@gmail.com
|
1013244a8cfb3d145b496f85e28b5a7569910cea
|
87a1e123c3ced5c54f15f3c2d1a9dbcaaf6c4be4
|
/post/admin.py
|
7156f502d5f3a2dab2287662bb759aed8d696db5
|
[] |
no_license
|
aaogoltcov/blogAPI
|
c73c26043a4fb4244eca617a0c3494cf98b3a2f5
|
232d6d2ffb412b1112be25bc4321c5011267ce45
|
refs/heads/master
| 2023-07-10T03:45:47.172277
| 2021-08-11T19:58:52
| 2021-08-11T19:58:52
| 391,724,564
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from post.models import Post, Comment
@admin.register(Post)
class PhotoAdmin(admin.ModelAdmin):
pass
admin.site.register(Comment, MPTTModelAdmin)
|
[
"aaogoltcov@mail.ru"
] |
aaogoltcov@mail.ru
|
bcd6616c0ea6410289fa174d2124d9cb3307c79a
|
fcf870abec4a3fe936668ed14afcded9c10e4aa3
|
/descnucleotide/ENAC.py
|
78208ab2ceb01406948e589745a25cd7baebd939
|
[] |
no_license
|
sirpan/iLearn
|
f8d81523720245cc1ab8368aeb609511fc93af5a
|
507aae17d9fea3d74a7c77984f1f1750eb734f53
|
refs/heads/master
| 2023-03-22T06:55:48.791894
| 2021-03-17T07:23:15
| 2021-03-17T07:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,672
|
py
|
#!/usr/bin/env python
# _*_coding:utf-8_*_
import re, sys, os, platform
from collections import Counter
import argparse
pPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(pPath)
father_path = os.path.abspath(
os.path.dirname(pPath) + os.path.sep + ".") + r'\pubscripts' if platform.system() == 'Windows' else os.path.abspath(
os.path.dirname(pPath) + os.path.sep + ".") + r'/pubscripts'
sys.path.append(father_path)
import read_fasta_sequences
import save_file
import check_sequences
def ENAC(fastas, window=5, **kw):
if check_sequences.check_fasta_with_equal_length == False:
print('Error: for "ENAC" encoding, the input fasta sequences should be with equal length. \n\n')
return 0
if window < 1:
print('Error: the sliding window should be greater than zero' + '\n\n')
return 0
if check_sequences.get_min_sequence_length(fastas) < window:
print('Error: all the sequence length should be larger than the sliding window :' + str(window) + '\n\n')
return 0
AA = kw['order'] if kw['order'] != None else 'ACGT'
encodings = []
header = ['#', 'label']
for w in range(1, len(fastas[0][1]) - window + 2):
for aa in AA:
header.append('SW.' + str(w) + '.' + aa)
encodings.append(header)
for i in fastas:
name, sequence, label = i[0], i[1], i[2]
code = [name, label]
for j in range(len(sequence)):
if j < len(sequence) and j + window <= len(sequence):
count = Counter(sequence[j:j + window])
for key in count:
count[key] = count[key] / len(sequence[j:j + window])
for aa in AA:
code.append(count[aa])
encodings.append(code)
return encodings
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage="it's usage tip.",
description="Generating ENAC feature vector for nucleotide sequences")
parser.add_argument("--file", required=True, help="input fasta file")
parser.add_argument("--slwindow", type=int, default=5, help="the sliding window of ENAC descriptor")
parser.add_argument("--format", choices=['csv', 'tsv', 'svm', 'weka'], default='svm', help="the encoding type")
parser.add_argument("--out", help="the generated descriptor file")
args = parser.parse_args()
output = args.out if args.out != None else 'encoding.txt'
kw = {'order': 'ACGT'}
fastas = read_fasta_sequences.read_nucleotide_sequences(args.file)
encodings = ENAC(fastas, window=args.slwindow, **kw)
save_file.save_file(encodings, args.format, output)
|
[
"noreply@github.com"
] |
sirpan.noreply@github.com
|
28d773c01a93fe55f405b13365affa72ac82f085
|
c8633d2e72701f103d44e98960be8e1d0032bbcf
|
/Opuslog/settings/common.py
|
ddb10849521a25a0f423e660c0ed611b3e752e46
|
[] |
no_license
|
rushil02/opuslog
|
c3165b21bfd8a2283e19a4251f8041eb7371e443
|
dd34e80302551dee674b8ec700620b47e339c7a2
|
refs/heads/master
| 2021-04-27T04:45:04.165429
| 2016-05-20T18:19:10
| 2016-05-20T18:19:10
| 122,584,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,648
|
py
|
"""
Django settings for Opuslog project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import psycopg2
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^@j4m-ff2wcago%qkos@4$q(8#0jm6rkp#k3#hcq#9xdy2a9lr'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'cities_light',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'debug_toolbar',
'tinymce',
'rest_framework', # TODO: remove?
'user_custom',
'publication',
'write_up',
'engagement',
'essential',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Opuslog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Opuslog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'OpuslogDB',
'USER': 'MooPoint',
'PASSWORD': 'root',
'HOST': 'localhost',
'OPTIONS': {
'isolation_level': psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ, # TODO: change if needed
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_URL = '/media/'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
SITE_ID = 1
# Cities-light settings
CITIES_LIGHT_TRANSLATION_LANGUAGES = ['en', ]
CITIES_LIGHT_INCLUDE_COUNTRIES = ['IN', ]
# Email settings
EMAIL_HOST_USER = 'moopoint1402@gmail.com'
EMAIL_HOST_PASSWORD = 'sappy8086'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Domain name settings
WEBSITE_DOMAIN = 'www.opuslog.com'
# django-tinymce
TINYMCE_SPELLCHECKER = True
TINYMCE_COMPRESSOR = True
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGE_SIZE': 10
}
|
[
"rushil0195@gmail.com"
] |
rushil0195@gmail.com
|
c3a7e9c3a0692d2974f6acff056f57ab42a7bf10
|
524591f2c4f760bc01c12fea3061833847a4ff9a
|
/arm/usr/lib/python2.7/dist-packages/rosdistro/source_file.py
|
d614c68bd8ca24a162d2b0dcb90e2b2449f5b3d9
|
[
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
Roboy/roboy_plexus
|
6f78d45c52055d97159fd4d0ca8e0f32f1fbd07e
|
1f3039edd24c059459563cb81d194326fe824905
|
refs/heads/roboy3
| 2023-03-10T15:01:34.703853
| 2021-08-16T13:42:54
| 2021-08-16T13:42:54
| 101,666,005
| 2
| 4
|
BSD-3-Clause
| 2022-10-22T13:43:45
| 2017-08-28T16:53:52
|
C++
|
UTF-8
|
Python
| false
| false
| 3,365
|
py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .repository_specification import RepositorySpecification
class SourceFile(object):
_type = 'distribution'
def __init__(self, name, data):
self.name = name
assert 'type' in data and data['type'] != 'source', "Unable to handle 'source' format anymore, please update your 'source' file to the latest specification"
assert 'type' in data, "Expected file type is '%s'" % SourceFile._type
assert data['type'] == SourceFile._type, "Expected file type is '%s', not '%s'" % (SourceFile._type, data['type'])
assert 'version' in data, "Source file for '%s' lacks required version information" % self.name
assert int(data['version']) == 1, "Unable to handle '%s' format version '%d', please update rosdistro (e.g. on Ubuntu/Debian use: sudo apt-get update && sudo apt-get install --only-upgrade python-rosdistro)" % (SourceFile._type, int(data['version']))
self.version = int(data['version'])
self.repositories = {}
if 'repositories' in data:
for repo_name in sorted(data['repositories']):
repo_data = data['repositories'][repo_name]
if 'source' not in repo_data:
continue
repo_data = repo_data['source']
try:
assert 'version' in repo_data, "Repository '%s' lacks required version information" % repo_name
repo = RepositorySpecification(repo_name, repo_data)
except AssertionError as e:
e.args = [("Source file '%s': %s" % (self.name, a) if i == 0 else a) for i, a in enumerate(e.args)]
raise e
self.repositories[repo_name] = repo
|
[
"simon.trendel@tum.de"
] |
simon.trendel@tum.de
|
8cd578f9a8dc8366a1bea5ec6d51e91c6c3858fc
|
1609fe579811afe6f36ddca9d7c838ba5697131a
|
/radio/management/commands/set_default_access_tg.py
|
d05b1976d5381eb276ac621005d8e58c1a02900a
|
[
"MIT"
] |
permissive
|
ScanOC/trunk-player
|
1777347f47744538a33109a05b72d5f28d2674ef
|
95f37e5a55a8f2a8b2ff6e1bb0a2b1049bc97ac4
|
refs/heads/master
| 2023-08-03T14:52:08.834013
| 2023-05-01T18:05:33
| 2023-05-01T18:05:33
| 68,432,108
| 65
| 50
|
MIT
| 2023-08-02T01:34:28
| 2016-09-17T04:35:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
import sys
import datetime
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from radio.models import *
class Command(BaseCommand):
help = 'Helper for new TalkGroup Access'
def add_arguments(self, parser):
parser.add_argument('access_group_name')
def handle(self, *args, **options):
access_menu(self, options)
def access_menu(self, options):
try:
access_gp = TalkGroupAccess.objects.get(name=options['access_group_name'])
except TalkGroupAccess.DoesNotExist:
self.stdout.write(self.style.ERROR('Talk Group Access List [{}] does not exist, check case and spelling'.format(options['access_group_name'])))
all_access_names = TalkGroupAccess.objects.all()
if all_access_names:
self.stdout.write('Current Talk Group Access lists in the database:')
for tg in all_access_names:
self.stdout.write(tg.name)
else:
self.stdout.write(self.style.ERROR('**There are no Talk Group Access lists in the database'))
return
self.stdout.write('Setting all current public Talk Groups into {}'.format(access_gp.name))
ct=0
for tg in TalkGroupWithSystem.objects.filter(public=True):
access_gp.talkgroups.add(tg)
ct += 1
self.stdout.write(self.style.SUCCESS('Added {} TalkGroups to Talk Group Access List - {}'.format(ct, access_gp.name)))
|
[
"dreinhold@gmail.com"
] |
dreinhold@gmail.com
|
b1f2cc85a77d533e1c78ae8a0ad3cb6b46551d10
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/angle/third_party/glmark2/src/waflib/Tools/md5_tstamp.py
|
29522311b19f3c194ccdb4ae91512b38a2798964
|
[
"GPL-3.0-only",
"LicenseRef-scancode-x11-opengl",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,stat
from waflib import Utils,Build,Node
STRONGEST=True
Build.SAVED_ATTRS.append('hashes_md5_tstamp')
def h_file(self):
filename=self.abspath()
st=os.stat(filename)
cache=self.ctx.hashes_md5_tstamp
if filename in cache and cache[filename][0]==st.st_mtime:
return cache[filename][1]
global STRONGEST
if STRONGEST:
ret=Utils.h_file(filename)
else:
if stat.S_ISDIR(st[stat.ST_MODE]):
raise IOError('Not a file')
ret=Utils.md5(str((st.st_mtime,st.st_size)).encode()).digest()
cache[filename]=(st.st_mtime,ret)
return ret
h_file.__doc__=Node.Node.h_file.__doc__
Node.Node.h_file=h_file
|
[
"jengelh@inai.de"
] |
jengelh@inai.de
|
30de42b2f9e57942415e8b6c7f980872b821c582
|
de590d5af29b1f962853a0c4395aa95a8aa06b58
|
/0x1F-pascal_triangle/0-pascal_triangle.py
|
2359f75ddfbbe4082bf683c1413fe97087f9b162
|
[] |
no_license
|
MCavigli/holbertonschool-interview
|
ff31425e41a49bddd6d96a1bd41835830246f132
|
ab7c1e9b92eb113e8b28db82912e327736e4813f
|
refs/heads/master
| 2020-12-23T06:32:16.256851
| 2020-11-12T20:13:48
| 2020-11-12T20:13:48
| 237,068,237
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
#!/usr/bin/python3
"""
0-pascal_triangle
"""
def pascal_triangle(n):
"""
Returns a list of integers representing Pascal's triangle
"""
lst = [[] for i in range(0, n)]
for i in range(0, n):
for j in range(i + 1):
if (j < i):
if (j <= 0):
lst[i].append(1)
else:
lst[i].append(lst[i - 1][j] + lst[i - 1][j - 1])
elif (i == j):
lst[i].append(1)
return (lst)
|
[
"mcavigli@gmail.com"
] |
mcavigli@gmail.com
|
7114d514f06a2b225e3125b6227ae109bc916818
|
f98fc6d067d1b82b184deeed530ffec38f3d0e9b
|
/waliki/views.py
|
5a03797bd254bc69070d75eedb5591865b9890ca
|
[
"BSD-3-Clause"
] |
permissive
|
leliel12/waliki
|
93de82e6326018578f70112446388ea66f4f3ddc
|
610d7ffb652e5eaa73824f4d69c85701ca059609
|
refs/heads/master
| 2021-01-17T14:07:59.503425
| 2014-09-21T23:38:54
| 2014-09-21T23:38:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
import json
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .models import Page
from .forms import PageForm
from .signals import page_saved
from ._markups import get_all_markups
from . import settings
def home(request):
return detail(request, slug=settings.WALIKI_INDEX_SLUG)
def detail(request, slug):
slug = slug.strip('/')
try:
page = Page.objects.get(slug=slug)
except Page.DoesNotExist:
page = None
return render(request, 'waliki/detail.html', {'page': page, 'slug': slug})
def edit(request, slug):
slug = slug.strip('/')
page, _ = Page.objects.get_or_create(slug=slug)
data = request.POST if request.method == 'POST' else None
form = PageForm(data, instance=page)
if form.is_valid():
form.save()
page_saved.send(sender=edit,
page=page,
author=request.user,
message=form.cleaned_data["message"])
return redirect('waliki_detail', slug=page.slug)
cm_modes = [(m.name, m.codemirror_mode_name) for m in get_all_markups()]
cm_settings = settings.WALIKI_CODEMIRROR_SETTINGS
cm_settings.update({'mode': dict(cm_modes)[page.markup]})
return render(request, 'waliki/edit.html', {'page': page,
'form': form,
'slug': slug,
'cm_modes': cm_modes,
'cm_settings': json.dumps(cm_settings)})
def preview(request):
data = {}
if request.is_ajax() and request.method == "POST":
data['html'] = Page.preview(request.POST['markup'], request.POST['text'])
return HttpResponse(json.dumps(data), content_type="application/json")
def delete(request, slug):
return render(request, 'waliki/detail.html', {})
|
[
"gaitan@gmail.com"
] |
gaitan@gmail.com
|
6e3fecbf74e22a8df6f5db0f7c7c17cca2e1d3aa
|
96316aead0ad883d93eebc3fcd2dcbb30ad6636a
|
/authproj/authapp/views.py
|
bee4b8390958421253069d227518f90b41c00afa
|
[] |
no_license
|
Rashika233/django_auth_new
|
fe2e5da31e4d1e309946c333fa0ed2d7ec369a44
|
c3fbf2a7b042e33ff92ee894a63976ad97e77f7c
|
refs/heads/master
| 2022-12-04T10:06:10.294965
| 2020-08-27T15:54:52
| 2020-08-27T15:54:52
| 290,818,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
class HomePageView(TemplateView):
template_name='home.html'
|
[
"you@example.com"
] |
you@example.com
|
c7a14f2f19153b5ed04edaec1cd126007973aac9
|
46e29adaf067dea495da67b09bf1be60d26c45c9
|
/genetic_algorithm/chromosome.py
|
9de551cf850875c5f52ca27381599032ce386d95
|
[
"MIT"
] |
permissive
|
GeorgianBadita/Genetic-Programming-Function-Approximation
|
89800f96a7c457a6b7d5d4bb2deae915a7e7f3bc
|
5436074941d8888eb545e2cc4b332c6c832342f3
|
refs/heads/master
| 2020-09-12T20:58:09.383435
| 2020-06-13T10:22:27
| 2020-06-13T10:22:27
| 222,554,527
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,825
|
py
|
import random
import math
import numpy as np
import warnings
warnings.filterwarnings("error")
class Chromosome:
"""
Class for representing a chromosome
"""
def __init__(self, terminal_set, funct_set, depth, method='full'):
"""
Constructor for Chromosome class
@param: depth - tree depth
@param: method - method to generate the tree, default is full
@param: terminal_set - set of terminals
@param: funct_set - set of functions
"""
self.depth = depth
self.gen = []
self.terminal_set = terminal_set
self.func_set = funct_set
self.fitness = None
if method == 'grow':
self.grow()
elif method == 'full':
self.full()
def full(self, level = 0):
"""
Function to generate a tree in a full manner
Every node will have exactly two children
return: None
"""
if level == self.depth:
self.gen.append(random.choice(self.terminal_set))
else:
val = random.choice(self.func_set[1] + self.func_set[2])
if val in self.func_set[2]:
self.gen.append(random.choice(self.func_set[2]))
self.full(level + 1)
self.full(level + 1)
else:
self.gen.append(random.choice(self.func_set[1]))
self.full(level + 1)
def grow(self, level = 0):
"""
Function to generate a tree in a grow manner
Every node may be a terminal or a function
@return: None
"""
if level == self.depth:
self.gen.append(random.choice(self.terminal_set))
else:
if random.random() > 0.3:
val = random.choice(self.func_set[2] + self.func_set[1])
if val in self.func_set[2]:
self.gen.append(val)
self.grow(level + 1)
self.grow(level + 1)
else:
self.gen.append(val)
self.grow(level + 1)
else:
val = random.choice(self.terminal_set)
self.gen.append(val)
def eval(self, input, poz = 0):
"""
Function to evaluate the current chromosome with a given input
@param: input - function input (x0, x1... xn)
@poz: current_position in genotype
@return:
"""
if self.gen[poz] in self.terminal_set:
return input[int(self.gen[poz][1:])], poz
elif self.gen[poz] in self.func_set[2]:
poz_op = poz
left, poz = self.eval(input, poz + 1)
right, poz = self.eval(input, poz + 1)
if self.gen[poz_op] == '+':
return left + right, poz
elif self.gen[poz_op] == '-':
return left - right, poz
elif self.gen[poz_op] == '*':
return left * right, poz
elif self.gen[poz_op] == '^':
return left ** right, poz
elif self.gen[poz_op] == '/':
return left / right, poz
else:
poz_op = poz
left, poz = self.eval(input, poz + 1)
if self.gen[poz_op] == 'sin':
return np.sin(left), poz
elif self.gen[poz_op] == 'cos':
return np.cos(left), poz
elif self.gen[poz_op] == 'ln':
return np.log(left), poz
elif self.gen[poz_op] == 'sqrt':
return np.sqrt(left), poz
elif self.gen[poz_op] == 'tg':
return np.tan(left), poz
elif self.gen[poz_op] == 'ctg':
return 1/np.tan(left), poz
elif self.gen[poz_op] == 'e':
return np.exp(left), poz
elif self.gen[poz_op] == 'tanh':
return np.tanh(left), poz
elif self.gen[poz_op] == 'abs':
return abs(left), poz
def evaluate_arg(self, input):
"""
Function to evaluate the current genotype to a given input
@return: the value of self.gen evaluated at the given input
"""
return self.eval(input)[0]
def calculate_fitness(self, inputs, outputs):
"""
Function to claculate the fitness of a chromosome
@param inputs: inputs of the function we want to predict
@param outputs: outputs of the function we want to predict
@return: the chromosome's fitness (calculated based on MSE)
"""
diff = 0
for i in range(len(inputs)):
try:
diff += (self.eval(inputs[i])[0] - outputs[i][0])**2
except RuntimeWarning:
self.gen = []
if random.random() > 0.5:
self.grow()
else:
self.full()
self.calculate_fitness(inputs, outputs)
if len(inputs) == 0:
return 1e9
self.fitness = diff/(len(inputs))
return self.fitness
def __get_depth_aux(self, poz = 0):
"""
Function to get the depth of a chromosome
@return: chromosome's depth, last pos
"""
elem = self.gen[poz]
if elem in self.func_set[2]:
left, poz = self.__get_depth_aux(poz + 1)
right, poz = self.__get_depth_aux(poz)
return 1 + max(left, right), poz
elif elem in self.func_set[1]:
left, poz = self.__get_depth_aux(poz + 1)
return left + 1, poz
else:
return 1, poz + 1
def get_depth(self):
"""
Function to get the depth of a chromosome
@return: - chromosome's depth
"""
return self.__get_depth_aux()[0] - 1
|
[
"geo.badita@gmail.com"
] |
geo.badita@gmail.com
|
917e84a924d158a00d84a9381f093fbcf770d0e0
|
0b08158331fc9dfaead2ce5d67665facc36ff7f5
|
/openstack_dashboard/dashboards/admin/routers/tables.py
|
e914f08ad26039c3eb0136534521d644768d30d4
|
[] |
no_license
|
linyihan2013/horizon
|
7086a5aad773c5eb45762b5ad8465e7e8c52e0fc
|
42adcfdcaeaf3366b6b8664d7de485fb8c3c901e
|
refs/heads/master
| 2021-01-11T19:13:35.118072
| 2017-02-03T07:45:40
| 2017-02-03T07:45:40
| 79,337,770
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,003
|
py
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers import tables as r_tables
class DeleteRouter(r_tables.DeleteRouter):
redirect_url = "horizon:admin:routers:index"
class EditRouter(r_tables.EditRouter):
url = "horizon:admin:routers:update"
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, router_id):
router = api.neutron.router_get(request, router_id)
return router
class AdminRoutersFilterAction(r_tables.RoutersFilterAction):
name = 'filter_admin_routers'
filter_choices = r_tables.RoutersFilterAction.filter_choices + (
('project', _("Project ="), True),)
class RoutersTable(r_tables.RoutersTable):
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
name = tables.WrappingColumn("name",
verbose_name=_("Name"),
link="horizon:admin:routers:detail")
class Meta(object):
name = "routers"
verbose_name = _("Routers")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (DeleteRouter, AdminRoutersFilterAction)
row_actions = (EditRouter, DeleteRouter,)
Columns = ('tenant', 'name', 'status', 'distributed', 'ext_net')
|
[
"1032612468@qq.com"
] |
1032612468@qq.com
|
94dc893c82918cbe23b3f108f0902528e8950ded
|
22749c6a569661b2637233cc0aebdc1701033b26
|
/src/python/pants/backend/python/lint/flake8/subsystem_test.py
|
b1819103c29e14c0aef21b06727ffe544a568063
|
[
"Apache-2.0"
] |
permissive
|
akk5597/pants
|
2eceb226c39b8ef7f603dfa96684b7522e1a9065
|
7ad295f71d2990eebbbe9c778bbf70f7d9e66584
|
refs/heads/main
| 2023-08-27T02:40:54.753545
| 2021-11-10T03:42:18
| 2021-11-10T03:42:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,397
|
py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from pants.backend.python import target_types_rules
from pants.backend.python.goals.lockfile import PythonLockfileRequest
from pants.backend.python.lint.flake8 import skip_field
from pants.backend.python.lint.flake8.subsystem import Flake8LockfileSentinel
from pants.backend.python.lint.flake8.subsystem import rules as subsystem_rules
from pants.backend.python.target_types import PythonSourcesGeneratorTarget
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.core.target_types import GenericTarget
from pants.testutil.rule_runner import QueryRule, RuleRunner
def test_setup_lockfile_interpreter_constraints() -> None:
rule_runner = RuleRunner(
rules=[
*subsystem_rules(),
*skip_field.rules(),
*target_types_rules.rules(),
QueryRule(PythonLockfileRequest, [Flake8LockfileSentinel]),
],
target_types=[PythonSourcesGeneratorTarget, GenericTarget],
)
global_constraint = "==3.9.*"
rule_runner.set_options(
["--flake8-lockfile=lockfile.txt"],
env={"PANTS_PYTHON_INTERPRETER_CONSTRAINTS": f"['{global_constraint}']"},
)
def assert_ics(build_file: str, expected: list[str]) -> None:
rule_runner.write_files({"project/BUILD": build_file, "project/f.py": ""})
lockfile_request = rule_runner.request(PythonLockfileRequest, [Flake8LockfileSentinel()])
assert lockfile_request.interpreter_constraints == InterpreterConstraints(expected)
assert_ics("python_sources()", [global_constraint])
assert_ics("python_sources(interpreter_constraints=['==2.7.*'])", ["==2.7.*"])
assert_ics(
"python_sources(interpreter_constraints=['==2.7.*', '==3.5.*'])", ["==2.7.*", "==3.5.*"]
)
# If no Python targets in repo, fall back to global [python] constraints.
assert_ics("target()", [global_constraint])
# Ignore targets that are skipped.
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*'])
python_sources(name='b', interpreter_constraints=['==3.5.*'], skip_flake8=True)
"""
),
["==2.7.*"],
)
# If there are multiple distinct ICs in the repo, we OR them. This is because Flake8 will
# group into each distinct IC.
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*'])
python_sources(name='b', interpreter_constraints=['==3.5.*'])
"""
),
["==2.7.*", "==3.5.*"],
)
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*', '==3.5.*'])
python_sources(name='b', interpreter_constraints=['>=3.5'])
"""
),
["==2.7.*", "==3.5.*", ">=3.5"],
)
assert_ics(
dedent(
"""\
python_sources(name='a')
python_sources(name='b', interpreter_constraints=['==2.7.*'])
python_sources(name='c', interpreter_constraints=['>=3.6'])
"""
),
["==2.7.*", global_constraint, ">=3.6"],
)
|
[
"noreply@github.com"
] |
akk5597.noreply@github.com
|
b094561bb43bc796f0b5a3d8cab158ada80d2a5c
|
7bcb08ff9aa4c7aee78fe11a51375ad69e5c651d
|
/TestMethod.py
|
d58300564fc885c2f6f9582216346878d8782568
|
[] |
no_license
|
lc527756006/MachineLearningCW3
|
923441a548891c104800d78c18c476733296c2aa
|
1c21d4579f79f1154769996c34283cb3f921f304
|
refs/heads/master
| 2021-01-17T04:25:35.786315
| 2017-02-22T23:14:33
| 2017-02-22T23:14:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
# encoding=utf-8
import numpy as ny
import scipy.io as sio
a=[[1,2,3],[4,5,6]]
b=ny.array(a)
c=[0 for i in range(6)]
def load_data(i):
# 加载数据
clean_data = sio.loadmat("DecisionTreeData/cleandata_students.mat")
tdata = clean_data['x']
ldata = clean_data['y']
print len(tdata)
print len(ldata)
# 处理label
label_result=[]
tdata_result=[]
for ind,label_data in enumerate(ldata):
if ind % 10 ==i:
real_label=label_data[0]
temp_label=[0 for i in range(6)]
temp_label[real_label-1]=1
label_result.append(temp_label)
tdata_result.append(tdata[ind])
ny_tdata=ny.array(tdata_result)
ny_label=ny.array(label_result)
return ny_tdata,ny_label
d,e=load_data(1)
# print b
# print c
print len(d)
print len(e)
|
[
"wuzifan0817@gmail.com"
] |
wuzifan0817@gmail.com
|
37a6ab00fbd2140699d440e7c55e4f0516154c08
|
0c70dcec22a090e70b1f20613ea6e0a64fd9a037
|
/GPS卫星位置的计算/venv/Lib/site-packages/pandas/tests/frame/methods/test_value_counts.py
|
d6ae0421b79083a459cc483107b921a45be42b0c
|
[
"MIT"
] |
permissive
|
payiz-asj/Gis
|
82c1096d830878f62c7a0d5dfb6630d4e4744764
|
3d315fed93e2ab850b836ddfd7a67f5618969d10
|
refs/heads/main
| 2023-06-27T15:25:17.301154
| 2021-08-03T10:02:58
| 2021-08-03T10:02:58
| 392,269,853
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
import numpy as np
import pandas as pd
import pandas._testing as tm
def test_data_frame_value_counts_unsorted():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(sort=False)
expected = pd.Series(
data=[1, 2, 1],
index=pd.MultiIndex.from_arrays(
[(2, 4, 6), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_ascending():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(ascending=True)
expected = pd.Series(
data=[1, 1, 2],
index=pd.MultiIndex.from_arrays(
[(2, 6, 4), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_default():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays(
[(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_normalize():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(normalize=True)
expected = pd.Series(
data=[0.5, 0.25, 0.25],
index=pd.MultiIndex.from_arrays(
[(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_single_col_default():
df = pd.DataFrame({"num_legs": [2, 4, 4, 6]})
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays([[4, 6, 2]], names=["num_legs"]),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts()
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty_normalize():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts(normalize=True)
expected = pd.Series([], dtype=np.float64)
tm.assert_series_equal(result, expected)
|
[
"1778029840@qq.com"
] |
1778029840@qq.com
|
a1c657b1669648b68aab122a276b9a7e415bd902
|
99701affb7ae46c42c55484f3301d59f79294a10
|
/project/Examples/Examples/PP2E/Gui/ShellGui/Old/menugui0.py
|
afa1fa88bd67a89c2bde4e46238fb0d828516893
|
[] |
no_license
|
inteljack/EL6183-Digital-Signal-Processing-Lab-2015-Fall
|
1050b9e9bddb335bf42b7debf2abebe51dd9f9e0
|
0f650a97d8fbaa576142e5bb1745f136b027bc73
|
refs/heads/master
| 2021-01-21T21:48:21.326372
| 2016-04-06T20:05:19
| 2016-04-06T20:05:19
| 42,902,523
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
#!/usr/local/bin/python
from shellgui import * # type-specific shell interfaces
class TextPak1(ListMenuGui):
def __init__(self):
self.myMenu = [('Pack', self.Pack),
('Unpack', self.Unpack),
('Mtool', self.Missing)]
ListMenuGui.__init__(self)
def forToolBar(self, label):
return label in ['Pack', 'Unpack']
def Pack(self): print 'pack dialog...'
def Unpack(self): print 'unpack dialog...'
def Missing(self): print 'not yet implemented...'
class TextPak2(DictMenuGui):
def __init__(self):
self.myMenu = {'Pack': self.Pack,
'Unpack': self.Unpack,
'Mtool': self.Missing}
DictMenuGui.__init__(self)
def Pack(self): print 'pack dialog...'
def Unpack(self): print 'unpack dialog...'
def Missing(self): print 'not yet implemented...'
if __name__ == '__main__': # self-test code...
from sys import argv
if len(argv) > 1 and argv[1] == 'list':
print 'list test'
TextPak1().mainloop()
else:
print 'dict test'
TextPak2().mainloop()
|
[
"inteljack2008@gmail.com"
] |
inteljack2008@gmail.com
|
e71b6313b4cdfca169fb2903c4a7e17fc6731107
|
1a9ec1308a2ade079e95782906e5f8af4ecb403e
|
/MySortAlgorithm/5_quicksort.py
|
89d85a25eed4a5731450f592a3eea1051cc15120
|
[] |
no_license
|
HeywoodKing/mytest
|
f0174f40bb60a7557ac361f566be36ac1b642366
|
ac19822dd28e3db60c56b57ba3dd50cb52736c6b
|
refs/heads/master
| 2022-12-12T15:47:28.141969
| 2020-07-24T00:46:01
| 2020-07-24T00:46:01
| 228,036,752
| 0
| 0
| null | 2022-12-08T06:18:26
| 2019-12-14T14:30:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
# -*- coding: utf-8 -*-
import time
"""
快速排序 (默认从小到大排序) 两边夹击
对于一个无序序列,取出第一个元素作为标志元素,剩下的元素定义两个头,一个低头,一个高头两个指针,从低指针开始和
标志元素比较,如果小于标志元素,继续查找,如果大于标志元素,停止低指针继续,从高指针一端开始查找,如果大于标志元素,
继续查找,如果小于标志元素,则将该元素和低指针的大于标志元素的交换位置,然后继续从低指针一端继续查找,知道两个指针相遇
,此位置插入标志元素,第一次遍历结束,在重复上述动作,以此类推
"""
# if alist[high] < mid_value:
# alist[low] = alist[high]
# low += 1
# elif :
# high -= 1
#
# if alist[low] < mid_value:
# low += 1
# elif alist[low] > mid_value:
# alist[high] = alist[low]
# high -= 1
# 最优时间复杂度O(nlog以2为底的n) 最坏时间复杂度O(n平方) 空间复杂度增加了 稳定性:不稳定
def quick_sort(alist, first=0, last=0):
# n = len(alist)
if last <= first:
return alist
mid_value = alist[first]
low = first
high = last
# if high == 0:
# high = n - 1
while low < high:
# 高指针方向开始移动(向左移动)
while low < high and alist[high] >= mid_value:
high -= 1
alist[low] = alist[high]
# low += 1
# 低指针方向开始移动(向右移动)
while low < high and alist[low] < mid_value:
low += 1
alist[high] = alist[low]
# high -= 1
alist[low] = mid_value
# llist = quick_sort(alist[:low - 1]) # 左侧序列
# rlist = quick_sort(alist[low + 1:]) # 右侧序列
quick_sort(alist, first, low - 1)
quick_sort(alist, low + 1, last)
return alist
if __name__ == "__main__":
ls = [33, 100, 4, 56, 39, 78, 12, 0, 20, 16]
start_time = time.time()
res = quick_sort(ls, 0, len(ls) - 1)
end_time = time.time()
print("耗时:%s" % (end_time - start_time))
print(res)
|
[
"opencoding@hotmail.com"
] |
opencoding@hotmail.com
|
39618f68fe15ead1d408d92e16a6c74de256073d
|
970565892722ac73038688343eddcd89f856e72b
|
/code/part2/mapper_ip_to_hits.py
|
b83031092aee84bcba8eb5f3a36e2ef278987ca5
|
[] |
no_license
|
dqian96/udacity-intro-to-hadoop-and-mapreduce
|
558d50881d6c20b8ef0c02f130d9b2b10857d547
|
0f5c58c60a47530ca8cac15b353f8a00b0efde0b
|
refs/heads/master
| 2021-06-08T20:54:33.486437
| 2016-11-26T00:20:01
| 2016-11-26T00:20:01
| 67,740,455
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
#!/usr/bin/python
# Maps page to number of hits.
# Map("log_chunk_2", "10.22.33...") -> [("1.1.1.1", 1),...,("1.1.1.1",2)]
import sys
import re
REGEX_COMMON_LOG_FORMAT = '(^[(\d{1,3}\.)]+\d{1,3}) (.+) (.+) \[(\d{2}/\w{3}/\d{4}):(\d{2}:\d{2}:\d{2}) -(\d{4})] "(\w+) (.+) ([\w\\/\.]+)[ ]*" (\d{3}) (-|\d+)$'
def parseLine(line):
line = line.strip('\n')
return re.match(REGEX_COMMON_LOG_FORMAT, line).groups()
for line in sys.stdin:
data = parseLine(line)
print data[0] + '\t' + "1"
|
[
"training@localhost.localdomain"
] |
training@localhost.localdomain
|
7526d4b5dca68fde0208ee88c56ae5e6478b6ba5
|
85c426913d63773c4802a4a3c354df909030654b
|
/python/FA3/Integration/Group 1/Group 1/PyFood/functionality/searchfunctions.py
|
30ddd14491b0ecaac3a8af817284a9985908b5ee
|
[] |
no_license
|
SensehacK/playgrounds
|
17bf2a3133db6c0cafe185c4cc2c7b59862980aa
|
3decd550cdb6034db8b497051acaaec8221073aa
|
refs/heads/master
| 2023-05-11T20:05:31.680168
| 2023-04-30T00:01:58
| 2023-04-30T00:01:58
| 159,632,542
| 1
| 0
| null | 2023-03-05T11:34:34
| 2018-11-29T08:27:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
'''
Created on Mar 16, 2017
@author: gaurav.sainger
'''
# from database.ViewDB import search_restaurants
from validations import Validate
from exceptions import CustomException2
from functionality import filtersearch
def search_as_guest():
try:
city=input("Enter your city:")
area=input("Enter your area:")
# city=city1.upper()
# area=area1.upper()
list_of_restaurants=Validate.validate_search_category(city,area)
'''
Print the details
'''
print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("resturantname type of food likes dislikes rating")
for select in list_of_restaurants:
print(select.get_restaurantname()," ",select.get_type_of_food()," " , select.get_likes()," ",select.get_dislikes()," ",select.get_rating())
print()
choice=input("Do you want to filter or select restaurant?(F/S)")
try:
if (choice.upper()=="F"):
filtersearch.Filter_search(city,area)
except CustomException2.Invalidfilter as e:
print(e)
except Exception as e:
print("Choose F or S")
print(e)
if (choice.upper()=="S"):
resturant_name=input("Enter the resturant name:")
print("")
except CustomException2.InvalidCategoryException as e:
print(e)
except Exception as e:
print(e)
print()
def search_as_login(city,area):
try:
list_of_restaurants=Validate.validate_search_category(city,area)
print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("resturantname type of food likes dislikes rating")
for select in list_of_restaurants:
print(select.get_restaurantname()," ",select.get_type_of_food()," " , select.get_likes()," ",select.get_dislikes()," ",select.get_rating())
print()
choice=input("Do you want to filter or select restaurant?(F/S)")
if (choice=="F"):
filtersearch.Filter_search()
if (choice=="S"):
resturant_name=input("Enter the resturant name:")
print("")
except InvalidCategoryException as e:
print(e)
except Exception as e:
print("Sorry. Some system error occurred")
print(e)
print()
|
[
"kautilyasave@gmail.com"
] |
kautilyasave@gmail.com
|
2fab840e3c8c56b96c177cfe1d3791bad4b10365
|
3307766701d680af6d12a726a2d98df2cb1830e5
|
/jams/gcj/2011/1C/C.py
|
6a9956971f4be9063aaf7c2938656157a3b9c2e9
|
[] |
no_license
|
dpaneda/code
|
c1a54037a275fa7044eb5c2d6079f052dd968615
|
7da1ede33a6a7cd19cbd0db517d91e7cccfbbfff
|
refs/heads/master
| 2023-01-07T18:41:00.816363
| 2022-12-30T09:24:22
| 2022-12-30T09:24:22
| 1,583,913
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
#!/usr/bin/python
# This works well and its clean but not good enought to fight agains large version :)
import sys
def Solve():
[n, l, h] = map(int,sys.stdin.readline().split())
notes = map(int,sys.stdin.readline().split())
index = l
while index < h+1:
freq = True
for i in notes:
if i % index != 0 and index % i != 0:
freq = False
break
if freq == True:
return jeff_note
index += 1
return "NO"
num = int(sys.stdin.readline())
for case in range(1, num + 1):
print "Case #%d: %s" % (case, Solve())
|
[
"dpaneda@gmail.com"
] |
dpaneda@gmail.com
|
66c5ff6582c9ed9bc4af5f1ff227ef25c5f8b9ca
|
35d1b988b4ea391ed648f46ec00c13e9ab6cd9d8
|
/salt/modules/ssh.py
|
fd67e49953513f45222220088b57fe3bb4a07a5c
|
[
"Apache-2.0"
] |
permissive
|
atoponce/salt
|
2ceb247433e6d7b8401a9e3c501ea75e89f798b2
|
19764c5b78820f81cdb8f7f429feb67a8859b692
|
refs/heads/master
| 2021-01-18T07:24:27.763822
| 2011-10-04T03:46:11
| 2011-10-04T03:46:11
| 2,509,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,099
|
py
|
'''
Manage client ssh components
'''
import os
def _refine_enc(enc):
'''
Return the properly formatted ssh value for the authorized encryption key
type. If the type is not found, return ssh-rsa, the ssh default.
'''
rsa = ['r', 'rsa', 'ssh-rsa']
dss = ['d', 'dsa', 'dss', 'ssh-dss']
if rsa.count(enc):
return 'ssh-rsa'
elif dss.count(enc):
return 'ssh-dss'
else:
return 'ssh-rsa'
def _format_auth_line(
key,
enc,
comment,
options):
line = ''
if options:
line += '{0} '.format(','.join(options))
line += '{0} {1} {2}'.format(enc, key, comment)
return line
def _replace_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=[],
config='.ssh/authorized_keys'):
'''
Replace an existing key
'''
auth_line = _format_auth_line(
key,
enc,
comment,
options)
lines = []
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
for line in open(full, 'r').readlines():
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
key_ind = 1
if not comps[0].startswith('ssh-'):
key_ind = 2
if comps[key_ind] == key:
lines.append(auth_line)
else:
lines.append(line)
open(full, 'w+').writelines(lines)
def host_keys(keydir=None):
'''
Return the minion's host keys
CLI Example:
salt '*' ssh.host_keys
'''
# Set up the default keydir - needs to support sshd_config parsing in the
# future
if not keydir:
if __grains__['Linux']:
keydir = '/etc/ssh'
keys = {}
for fn_ in os.listdir(keydir):
if fn_.startswith('ssh_host_'):
top = fn_.split('.')
comps = fn_.split('_')
kname = comps[2]
if len(top) > 1:
kname += '.{0}'.format(top[1])
try:
keys[kname] = open(os.path.join(keydir, fn_), 'r').read()
except:
keys[kname] = ''
return keys
def auth_keys(user, config='.ssh/authorized_keys'):
'''
Return the authorized keys for the specified user
CLI Example:
salt '*' ssh.auth_keys root
'''
ret = {}
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
if not os.path.isfile(full):
return {}
for line in open(full, 'r').readlines():
if line.startswith('#'):
# Commented Line
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
continue
if not comps[0].startswith('ssh-'):
# It has options, grab them
options = comps[0].split(',')
else:
options = []
if not options:
enc = comps[0]
key = comps[1]
comment = ' '.join(comps[2:])
else:
enc = comps[1]
key = comps[2]
comment = ' '.join(comps[3:])
ret[key] = {'enc': enc,
'comment': comment,
'options': options}
return ret
def rm_auth_key(user, key, config='.ssh/authorized_keys'):
'''
Remove an authorized key from the specified user's authorized key file
CLI Example:
salt '*' ssh.rm_auth_key <user> <key>
'''
current = auth_keys(user, config)
if current.has_key(key):
# Remove the key
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
if not os.path.isfile(full):
return 'User authorized keys file not present'
lines = []
for line in open(full, 'r').readlines():
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
if not comps[0].startswith('ssh-'):
# It has options, grab them
options = comps[0].split(',')
else:
options = []
if not options:
pkey = comps[1]
else:
pkey = comps[2]
if pkey == key:
continue
else:
lines.append(line)
open(full, 'w+').writelines(lines)
return 'Key removed'
return 'Key not present'
def set_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=[],
config='.ssh/authorized_keys'):
'''
Add a key to the authorized_keys file
CLI Example:
salt '*' ssh.set_auth_key <user> <key> dsa '[]' .ssh/authorized_keys
'''
enc = _refine_enc(enc)
ret = ''
replace = False
uinfo = __salt__['user.info'](user)
current = auth_keys(user, config)
if current.has_key(key):
if not set(current['options']) == set(options):
replace = True
if not current['enc'] == enc:
replace = True
if not current['comment'] == comment:
if comment:
replace = True
if replace:
_replace_auth_key(
user,
key,
enc,
comment,
options,
config)
return 'replace'
else:
return 'no change'
else:
auth_line = _format_auth_line(
key,
enc,
comment,
options)
open(
os.path.join(uinfo['home'], config), 'a+').write(
'\n{0}'.format(auth_line))
return 'new'
|
[
"thatch45@gmail.com"
] |
thatch45@gmail.com
|
dfc5a128780d692bffb8e8c4f0479adb3a7df600
|
d20184bce93f6d4da8e4e1b430a0d9828acc7459
|
/tensorflow_datasets/core/features/image_feature_test.py
|
053951347dc5ef224d171666ddbcf97f01f57fde
|
[
"Apache-2.0"
] |
permissive
|
abiraja2004/datasets
|
0bf6da2aaf86f332b54b1bcc0bfbd58eea4b2a7b
|
13b5287be1c400563d559384bd8e6d4d0244ba85
|
refs/heads/master
| 2020-04-05T12:17:02.018945
| 2018-11-09T11:51:19
| 2018-11-09T11:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,983
|
py
|
# coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.features.image_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow_datasets.core import features
from tensorflow_datasets.core import test_utils
class ImageFeatureTest(tf.test.TestCase):
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def test_encode_decode(self):
specs = features.SpecDict({
'img': features.Image(),
'img_shaped': features.Image(shape=(32, 64, 3)),
'img_file': features.Image(),
})
img = np.random.randint(256, size=(128, 100, 3), dtype=np.uint8)
img_shaped = np.random.randint(256, size=(32, 64, 3), dtype=np.uint8)
img_file_path = os.path.join(os.path.dirname(__file__),
'../test_data/6pixels.png')
img_file_expected_content = [ # see tests_data/README.md
[[0, 255, 0], [255, 0, 0], [255, 0, 255]],
[[0, 0, 255], [255, 255, 0], [126, 127, 128]],
]
decoded_sample = test_utils.features_encode_decode(specs, {
'img': img,
'img_shaped': img_shaped,
'img_file': img_file_path,
})
self.assertAllEqual(decoded_sample['img'], img)
self.assertAllEqual(decoded_sample['img_shaped'], img_shaped)
self.assertAllEqual(decoded_sample['img_file'], img_file_expected_content)
# 'img' shape can be dynamic
img2 = np.random.randint(256, size=(64, 200, 3), dtype=np.uint8)
decoded_sample = test_utils.features_encode_decode(specs, {
'img': img2,
'img_shaped': img_shaped,
'img_file': img_file_path,
})
self.assertAllEqual(decoded_sample['img'], img2)
# 'img_shaped' shape should be static
img_shaped2 = np.random.randint(256, size=(31, 64, 3), dtype=np.uint8)
with self.assertRaisesWithPredicateMatch(ValueError, 'are incompatible'):
test_utils.features_encode_decode(specs, {
'img': img2,
'img_shaped': img_shaped2,
'img_file': img_file_path,
})
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def test_wrong_input(self):
specs = features.SpecDict({
'img': features.Image(),
})
# Correct shape/type should succeed
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(128, 128, 3), dtype=np.uint8),
})
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(64, 64, 3), dtype=np.uint8),
})
# Invalid type
with self.assertRaisesWithPredicateMatch(ValueError, 'should be uint8'):
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(128, 128, 3), dtype=np.uint32),
})
# Invalid number of dimensions
with self.assertRaisesWithPredicateMatch(ValueError,
'must have the same rank'):
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(128, 128), dtype=np.uint8),
})
# Invalid number of channels
with self.assertRaisesWithPredicateMatch(ValueError, 'are incompatible'):
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(128, 128, 1), dtype=np.uint8),
})
if __name__ == '__main__':
tf.test.main()
|
[
"copybara-piper@google.com"
] |
copybara-piper@google.com
|
c648b82f5a7fde552e18e089edae3470823c484a
|
2a9e31f2dc50474cc33ea85a2b4dcb23b3a8b737
|
/raven/utils/stacks.py
|
57030b90849632bc56bab087fe41faf7aaf6a7fd
|
[
"BSD-3-Clause"
] |
permissive
|
mitsuhiko/raven
|
39ce3971adc90da5544251a58baac63fbf031c8a
|
3d9bd01e2881f58c7f9156c1fd243569547840ed
|
refs/heads/master
| 2023-06-08T17:51:57.663446
| 2011-10-12T01:47:04
| 2011-10-12T01:47:04
| 2,559,563
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,505
|
py
|
"""
raven.utils.stacks
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import inspect
import re
from raven.utils.encoding import transform
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
# Traceback (most recent call last):
# File "/Users/dcramer/Development/django-sentry/sentry/client/handlers.py", line 31, in emit
# get_client().create_from_record(record, request=request)
# File "/Users/dcramer/Development/django-sentry/sentry/client/base.py", line 325, in create_from_record
# data['__sentry__']['frames'] = varmap(shorten, get_stack_info(stack))
# File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 112, in get_stack_info
# pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
# File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 24, in get_lines_from_file
# source = loader.get_source(module_name)
# File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 287, in get_source
# fullname = self._fix_name(fullname)
# File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 262, in _fix_name
# "module %s" % (self.fullname, fullname))
# ImportError: Loader for module cProfile cannot handle module __main__
source = None
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename)
try:
source = f.readlines()
finally:
f.close()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = _coding_re.search(line)
if match:
encoding = match.group(1)
break
source = [unicode(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_culprit(frames, include_paths=[], exclude_paths=[]):
# We iterate through each frame looking for a deterministic culprit
# When one is found, we mark it as last "best guess" (best_guess) and then
# check it against ``exclude_paths``. If it isnt listed, then we
# use this option. If nothing is found, we use the "best guess".
best_guess = None
culprit = None
for frame in frames:
try:
culprit = '.'.join([frame['module'], frame['function']])
except KeyError:
continue
if any((culprit.startswith(k) for k in include_paths)):
if not (best_guess and any((culprit.startswith(k) for k in exclude_paths))):
best_guess = culprit
elif best_guess:
break
# Return either the best guess or the last frames call
return best_guess or culprit
def iter_traceback_frames(tb):
while tb:
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if not tb.tb_frame.f_locals.get('__traceback_hide__'):
yield tb.tb_frame
tb = tb.tb_next
def iter_stack_frames():
for frame_crud in inspect.stack()[1:]:
yield frame_crud[0]
def get_stack_info(frames):
results = []
for frame in frames:
# Support hidden frames
if frame.f_locals.get('__traceback_hide__'):
continue
filename = frame.f_code.co_filename
function = frame.f_code.co_name
lineno = frame.f_lineno - 1
loader = frame.f_globals.get('__loader__')
module_name = frame.f_globals.get('__name__')
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
results.append({
'id': id(frame),
'filename': filename,
'module': module_name,
'function': function,
'lineno': lineno + 1,
# TODO: vars need to be references
'vars': transform(frame.f_locals.items()),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
return results
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
bc918e41ae9466ad97805350c85c83c738ad612c
|
b3e525a3c48800303019adac8f9079109c88004e
|
/iota/test/iris/testcases/swm/mode_switch.py
|
2163fc162470d0451dfa579e13746f89f9d2aa70
|
[] |
no_license
|
PsymonLi/sw
|
d272aee23bf66ebb1143785d6cb5e6fa3927f784
|
3890a88283a4a4b4f7488f0f79698445c814ee81
|
refs/heads/master
| 2022-12-16T21:04:26.379534
| 2020-08-27T07:57:22
| 2020-08-28T01:15:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
#! /usr/bin/python3
import paramiko
import logging
import sys
import time
import traceback
import iota.harness.api as api
from iota.harness.thirdparty.redfish import redfish_client
from iota.test.utils.redfish.ping import ping
from iota.test.utils.redfish.ncsi_ops import check_set_ncsi
from iota.test.utils.redfish.nic_ops import get_nic_mode
from iota.test.utils.redfish.ncsi_ops import set_ncsi_mode
from iota.test.utils.redfish.dedicated_mode_ops import set_dedicated_mode
from iota.test.utils.redfish.common import get_redfish_obj
def Setup(tc):
naples_nodes = api.GetNaplesNodes()
if len(naples_nodes) == 0:
api.Logger.error("No naples node found")
return api.types.status.ERROR
tc.test_node = naples_nodes[0]
tc.node_name = tc.test_node.Name()
cimc_info = tc.test_node.GetCimcInfo()
if not cimc_info:
api.Logger.error("CimcInfo is None, exiting")
return api.types.status.ERROR
tc.ilo_ip = cimc_info.GetIp()
tc.ilo_ncsi_ip = cimc_info.GetNcsiIp()
tc.cimc_info = cimc_info
if ping(tc.ilo_ip, 3) == api.types.status.SUCCESS:
tc.initial_mode = "dedicated"
elif ping(tc.ilo_ncsi_ip, 3) == api.types.status.SUCCESS:
tc.initial_mode = "ncsi"
else:
api.Logger.error('ILO unreachable')
return api.types.status.FAILURE
return api.types.status.SUCCESS
def Trigger(tc):
max_pings = int(getattr(tc.args, "max_pings", 60))
mode = tc.initial_mode
try:
for _i in range(tc.iterators.count):
RF = get_redfish_obj(tc.cimc_info, mode=mode)
obs_mode = get_nic_mode(RF)
api.Logger.info("Iteration %d: curr_mode %s" % (_i, obs_mode))
if mode != obs_mode:
raise RuntimeError("Expected NIC mode %s, observed %s" % (mode, obs_mode))
next_mode = "dedicated" if mode == "ncsi" else "ncsi"
if next_mode == "ncsi":
ret = set_ncsi_mode(RF, mode="dhcp")
else:
ret = set_dedicated_mode(RF, mode="dhcp")
if ret != api.types.status.SUCCESS:
api.Logger.error("Mode switch from %s -> %s failed" %(mode, next_mode))
return api.types.status.FAILURE
api.Logger.info("Switched mode to %s" % (next_mode))
time.sleep(5)
if ret == api.types.status.SUCCESS:
curr_ilo_ip = tc.ilo_ip if next_mode == "dedicated" else tc.ilo_ncsi_ip
ret = ping(curr_ilo_ip, max_pings)
if ret != api.types.status.SUCCESS:
RF.logout()
raise RuntimeError('Unable to ping ILO, Port Switch fail from'
' %s -> %s' % (mode, next_mode))
api.Logger.info("Mode switch from %s -> %s successful" % (mode, next_mode))
else:
raise RuntimeError('Mode switch config failed')
mode = next_mode
except:
api.Logger.error(traceback.format_exc())
return api.types.status.FAILURE
return api.types.status.SUCCESS
def Verify(tc):
return api.types.status.SUCCESS
def Teardown(tc):
return api.types.status.SUCCESS
|
[
"noreply@github.com"
] |
PsymonLi.noreply@github.com
|
60cd5aa547cedf3c4161dfe2689d0b25e1907946
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/nm8zFcqcQ9Rzu45Fm_8.py
|
6f23ec4930e6f866f2cf4c94d2f9d47afbdc03e8
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
def bridge_shuffle(lst1, lst2):
suffled_list, i = [], 0
while True:
if i >= len(lst1):
return suffled_list + lst2[i:]
if i >= len(lst2):
return suffled_list + lst1[i:]
suffled_list += [lst1[i]] + [lst2[i]]
i += 1
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
b9760bc5d28254cf3a26ee63a1685d0d8051e09d
|
00f1d6f2998e404deb42c43b3b03c8089b263568
|
/CCV4-adv/week4/lanhuajian/loss.py
|
05ed3934181726fa4c5cd98aa89fed59991ec0d3
|
[] |
no_license
|
xiashiwendao/kaikeba
|
430bdfa4a0b99700fcbe76fce9118791917ade13
|
986f72e3f4eb9d210ebf6b46d9cace6c24353865
|
refs/heads/master
| 2022-12-26T15:41:06.842476
| 2020-10-10T06:20:47
| 2020-10-10T06:20:47
| 293,436,997
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import torch
from torch import nn
# 二分类focal loss
class BCEFocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25, reduction="elementwise_mean"):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
def forward(self, pt, target):
alpha = self.alpha
loss = - alpha * (1 - pt) ** self.gamma * target * torch.log(pt) - \
(1 - alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt)
if self.reduction == "elementwise_mean":
loss = torch.mean(loss)
elif self.reduction == "sum":
loss = torch.sum(loss)
return loss
|
[
"xiashiwendao@163.com"
] |
xiashiwendao@163.com
|
3189a9bddb5e0869d4c2a3fd33df5916788f3680
|
b403c7fe56209472855dff451f0b6283d5471008
|
/Supplemental_Material/PythonProjects/13. PYGAME/Pygames/pygame1.py
|
cb258cdafed5b51868ddb1c79da280eb4fc9e0e4
|
[] |
no_license
|
Sandbox4KidsTM/Python_Basics
|
842bde52796896e913fdb5cc349034c52092555f
|
68c95547ec1567958fc8069e6a4bb119e436211a
|
refs/heads/master
| 2020-03-23T01:06:29.363196
| 2018-08-10T04:32:58
| 2018-08-10T04:32:58
| 140,901,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
#sentdex YouTube Video Tutorial Series
#first pygame with basic window
import pygame #import pygame module
pygame.init() #start pygame #pygame is an instance, hence need to instantiate it
gameDisplay = pygame.display.set_mode((800, 600)) #resolution of the game window
#set_mode() expects a tuple, not 2 parameters, hence (800,600) is a tuple
pygame.display.set_caption('A racing game') #title of the game window
clock = pygame.time.Clock() #clock for game
crashed = False
while not crashed:
for event in pygame.event.get(): #gets all the events per frame per second
if event.type == pygame.QUIT:
crashed = True #break out of the while loop
print(event) #print all the events in the console
pygame.display.update() #update the screen #udpates just the current window
# alternate is pygame.display.flip() #updates the entire surface (all windows)
clock.tick(60) #how fast do you want to want to display in frames per second
pygame.quit() #end pygame
quit()
|
[
"mitchslabrenz@gmail.com"
] |
mitchslabrenz@gmail.com
|
a317c7c7a2454ad9c8e3d5914e0c7b875ea45f70
|
13a4df75e81ee4330a197340a300ec0755247a93
|
/aKindOfQuestion/5.arithmeticRobot.py
|
1b2226136d037872506976d20fe89461be50c385
|
[] |
no_license
|
ltfafei/py_Leetcode_study
|
d22955380bf9f134bc9cb215fea73ec4f9ea94cf
|
0fd1bca56a621001cf9093f60941c4bfed4c79a5
|
refs/heads/master
| 2023-07-13T18:15:59.098314
| 2021-08-30T15:11:17
| 2021-08-30T15:11:17
| 363,597,757
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
#!/usr/bin/python3
#-*- coding: UTF-8 -*-
#Author: afei00123
'''
5、算术机器人
现在利用编程设计一个简单的算术机器人,只需输入两个数字,在输入一个指令,机器人可计算并将结果返回。
输入的数值可以是任意整数,假设为X和Y,计算指令为A和B。A指令进行如下运算:
X*2+Y
Y指令表示如下运算:
Y*2+X
如果输入指令:AB,则先进行A指令运算,再进行B指令运算。最终将运算结果返回。
'''
def arithRobot(x, y, s):
n = 0
for c in s:
if c == "A":
n += 2 * x + y
else:
n += 2 * y + x
return n
print(arithRobot(3, 9, "AB"))
'''
Output result:
36
'''
|
[
"m18479685120@163.com"
] |
m18479685120@163.com
|
2b6a99e9ca658e6f1cc574e56292dd95a63adb96
|
3b98ee18977177e10b57e6162a03204e3774d3b8
|
/Kirk_Byers_python_for_network_engineers/week2/week2exercise1.py
|
7ed21f9bba6e7484fd3a5b58b9e2f55c26fba027
|
[] |
no_license
|
mattmiller87/practice
|
0a3d1cae1283abb683dfab0af86e6c569a6104e1
|
9655a8020038e0f6dfe8df842867debac0fcb1e3
|
refs/heads/master
| 2022-06-23T23:47:50.350379
| 2022-06-14T13:30:51
| 2022-06-14T13:38:56
| 51,970,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
'''
I. Create a script that does the following
A. Prompts the user to input an IP network.
Notes:
1. For simplicity the network is always assumed to be a /24 network
2. The network can be entered in using one of the following three formats 10.88.17.0, 10.88.17., or 10.88.17
B. Regardless of which of the three formats is used, store this IP network as a list in the following format ['10', '88', '17', '0'] i.e. a list with four octets (all strings), the last octet is always zero (a string).
Hint: There is a way you can accomplish this using a list slice.
Hint2: If you can't solve this question with a list slice, then try using the below if statement (note, we haven't discussed if/else conditionals yet; we will talk about them in the next class).
if len(octets) == 3:
octets.append('0')
elif len(octets) == 4:
octets[3] = '0'
C. Print the IP network out to the screen.
D. Print a table that looks like the following (columns 20 characters in width):
NETWORK_NUMBER FIRST_OCTET_BINARY FIRST_OCTET_HEX
88.19.107.0 0b1011000 0x58
'''
ip_addr = raw_input("Please enter an IP address: ")
print "The IP Address you entered is: " + ip_addr
ip_parts = ip_addr.split(".")
ip_parts[3] = "0"
ip_fix = ".".join(ip_parts)
first_octet_binary = bin(int(ip_parts[0]))
first_octet_hex = hex(int(ip_parts[0]))
print "%-20s %-20s %-20s" % ("NETWORK_NUMBER","FIRST_OCTET_BINARY","FIRST_OCTET_HEX")
print "%-20s %-20s %-20s" % (ip_fix,first_octet_binary,first_octet_hex)
|
[
"mattmiller87@gmail.com"
] |
mattmiller87@gmail.com
|
42a67c3fb9dd598afba939fca288531e7c478b96
|
3fd0bd83099a2405c53c8f1b3f8235d7ebb46fbd
|
/tests/unit/specs/test_timer.py
|
5f77be1c6fa44cc2d266e027f6b838b25f532ba4
|
[
"MIT"
] |
permissive
|
diogoaurelio/floto
|
59df848e0844314c999ad0833fec3671ea942cb9
|
5d1dedf91ea427db1f0fd9d7005fc3fa36e17cb6
|
refs/heads/master
| 2021-01-22T16:38:20.500203
| 2016-03-22T07:31:04
| 2016-03-22T07:31:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
import pytest
import floto.specs
class TestTimer(object):
def test_init(self):
t = floto.specs.Timer(id_='my_timer', requires=['t2'], delay_in_seconds=600)
assert t.id_ == 'my_timer'
assert t.requires == ['t2']
assert t.delay_in_seconds == 600
|
[
"amatthies@babbel.com"
] |
amatthies@babbel.com
|
8726a52380b69f66f75746665c55e2a15f298458
|
e3da1286e01dec6a2b096e3d2f6620218468a391
|
/.scripts/submit.py
|
9dab5a0c353face38047e702bdfbca6c71c92620
|
[] |
no_license
|
SquidneySquush/cse-20289
|
e42d6314d3a05a75b9ba4461002647019a215211
|
2e377f65e5dfde07fce277dbb30e04531e84d59c
|
refs/heads/master
| 2022-11-17T07:31:22.956873
| 2020-05-01T20:42:05
| 2020-05-01T20:42:05
| 264,722,989
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
#!/usr/bin/env python3
import glob
import json
import os
import sys
import requests
import yaml
# Globals
ASSIGNMENTS = {}
DREDD_QUIZ_URL = 'https://dredd.h4x0r.space/quiz/cse-20289-sp20/'
DREDD_QUIZ_MAX = 4.0
# Utilities
def add_assignment(assignment, path=None):
if path is None:
path = assignment
if assignment.startswith('reading'):
ASSIGNMENTS[assignment] = path
def print_results(results):
for key, value in sorted(results):
try:
print('{:>8} {:.2f}'.format(key.title(), value))
except ValueError:
print('{:>8} {}'.format(key.title(), value))
# Submit Functions
def submit_quiz(assignment, path):
answers = None
for mod_load, ext in ((json.load, 'json'), (yaml.safe_load, 'yaml')):
try:
answers = mod_load(open(os.path.join(path, 'answers.' + ext)))
except IOError as e:
pass
except Exception as e:
print('Unable to parse answers.{}: {}'.format(ext, e))
return 1
if answers is None:
print('No quiz found (answers.{json,yaml})')
return 1
print('Submitting {} quiz ...'.format(assignment))
response = requests.post(DREDD_QUIZ_URL + assignment, data=json.dumps(answers))
print_results(response.json().items())
return 0 if response.json().get('score', 0) >= DREDD_QUIZ_MAX else 1
# Main Execution
# Add GitLab branch
try:
add_assignment(os.environ['CI_BUILD_REF_NAME'])
except KeyError:
pass
# Add local git branch
try:
add_assignment(os.popen('git symbolic-ref -q --short HEAD 2> /dev/null').read().strip())
except OSError:
pass
# Add current directory
add_assignment(os.path.basename(os.path.abspath(os.curdir)), os.curdir)
# For each assignment, submit quiz answers and program code
if not ASSIGNMENTS:
print('Nothing to submit!')
sys.exit(1)
exit_code = 0
for assignment, path in sorted(ASSIGNMENTS.items()):
print('Submitting {} assignment ...'.format(assignment))
if 'reading' in assignment:
exit_code += submit_quiz(assignment, path)
sys.exit(exit_code)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
|
[
"pbui@nd.edu"
] |
pbui@nd.edu
|
97971326bba15e94906f8e029d5d8d64d6ef433d
|
40b0284e928451f4c26c4aa75180bd3f36251e0d
|
/tw2/asyncresources/requirejs/__init__.py
|
df843783696a1cac737c908606160d0dc98e55ed
|
[
"MIT"
] |
permissive
|
amol-/tw2.asyncresources
|
2a8f318ff985b4ce2024bfa54707d389250601a2
|
298f6cc17ccc668f577b19322f2148b290c751b0
|
refs/heads/master
| 2021-01-18T14:38:42.437050
| 2014-05-12T09:23:43
| 2014-05-12T09:23:43
| 19,569,415
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
import json
from tw2.core import params as tw2pm
from tw2.core.resources import JSLink as TW2JSLink
from tw2.core.resources import CSSLink as TW2CSSLink
from tw2.core.resources import JSSource as TW2JSSource
class JSLink(TW2JSLink):
inline_engine_name = 'genshi'
location = 'headbottom'
template = '<script type="text/javascript">require(["$w.link"])</script>'
class CSSLink(TW2CSSLink):
inline_engine_name = 'genshi'
location = 'headbottom'
template = '''<script type="text/javascript">
(function(url) {
var link = document.createElement("link");
link.type = "text/css";
link.rel = "stylesheet";
link.href = url;
document.getElementsByTagName("head")[0].appendChild(link);
})("$w.link");
</script>'''
class JSSource(TW2JSSource):
dependencies = tw2pm.Param('resources required by this script')
inline_engine_name = 'genshi'
template = '<script type="text/javascript">require(${w.js_dependencies}, function() { $w.src })</script>'
def prepare(self):
super(TW2JSSource, self).prepare()
self.js_dependencies = json.dumps(self.dependencies.js_links)
|
[
"alessandro.molina@axant.it"
] |
alessandro.molina@axant.it
|
0d28b008daa72d90879d795f9f62367f1da6e748
|
46357db3b1c1af699384d9cba1ffbc3c732117ad
|
/selenium_basics/07_webdriver_api/scrolldown.py
|
439b92428042e802fb07494fe1578a9eabdd4038
|
[] |
permissive
|
khanhdodang/automation-training-python
|
28fbd70ca4bc84e47cf17d1e4702513863e38c44
|
b16143961cee869c7555b449e2a05abeae2dc3b5
|
refs/heads/master
| 2023-07-11T05:21:34.495851
| 2021-08-18T01:29:37
| 2021-08-18T01:29:37
| 285,208,030
| 0
| 8
|
MIT
| 2020-09-29T07:01:15
| 2020-08-05T07:01:46
|
Python
|
UTF-8
|
Python
| false
| false
| 420
|
py
|
# import webdriver
from selenium import webdriver
# import time
import time
# create webdriver object
driver = webdriver.Chrome()
driver.maximize_window()
# get geeksforgeeks.org
driver.get("https://www.geeksforgeeks.org/")
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(3)
driver.execute_script("window.scrollTo(document.body.scrollHeight, 0);")
time.sleep(3)
driver.close()
|
[
"khanhdo.pmp@gmail.com"
] |
khanhdo.pmp@gmail.com
|
6379119d77fb8304861e6bb9226ab3b25fb3c157
|
7f44a279773732b183963349d146a8dd9a195b88
|
/home/migrations/0022_auto_20200929_1731.py
|
7074f056ac8ef3685cb16efd7ff2ff64f2433ad2
|
[] |
no_license
|
pseudobabble/cms-boilerplate
|
f138060e2f25721191289eb261185136ae9cf6bd
|
3923a8ebe1541118c5551b0996557f241943831f
|
refs/heads/master
| 2022-12-28T01:30:49.554898
| 2020-10-15T15:23:10
| 2020-10-15T15:23:10
| 283,308,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
# Generated by Django 3.1.1 on 2020-09-29 17:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0021_auto_20200929_1725'),
]
operations = [
migrations.RemoveField(
model_name='artwork',
name='artwork_soundbite',
),
migrations.AddField(
model_name='soundbite',
name='artwork_soundbite',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='artwork_soundbites', to='home.artwork'),
),
]
|
[
"harryjohnson500@gmail.com"
] |
harryjohnson500@gmail.com
|
f3c793e84385a281b30ca565ac2bf8ddad2993fb
|
0561900bf01598e6c453131952fe4f62b2f9c6e9
|
/week2/List Problems/sorted_names.py
|
9c0c756f42014ab1a873fe9e1df0d2fc72f6a37c
|
[] |
no_license
|
Rositsazz/HackBulgaria-Programming0
|
497f40eefa373b024389c58e7d83aff3ffc547ac
|
0590d7430ff0aadfb737593a04d3ab1eb894f8d3
|
refs/heads/master
| 2016-09-06T09:09:19.416648
| 2015-08-11T10:02:49
| 2015-08-11T10:02:49
| 30,868,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
n = input("Enter number:")
n = int(n)
count = 0
array = []
while count < n :
m = input("enter word: " )
array = array + [m]
count+=1
print("Sorted names are:")
b = sorted(array)
i = 0
while i<len(b):
print(b[i])
i+=1
|
[
"ross_zz@mail.bg"
] |
ross_zz@mail.bg
|
ed51a887cbf8c93ace619126a900925c2cbcdf54
|
78dc15505e17cef3e49410bbadc1bb4812cdbbad
|
/foiamachine/settings.default.py
|
5f1cdffa10b06c608591e67abe7ad5e408b15d45
|
[
"MIT"
] |
permissive
|
jgillum/foiamachine
|
4a7e4ef9fec681341c014dbe7c98bbce79debb4e
|
26d3b02870227696cdaab639c39d47b2a7a42ae5
|
refs/heads/master
| 2020-06-29T11:19:46.232758
| 2019-08-19T02:27:45
| 2019-08-19T02:27:45
| 200,519,075
| 3
| 1
| null | 2019-08-04T16:57:27
| 2019-08-04T16:57:27
| null |
UTF-8
|
Python
| false
| false
| 5,386
|
py
|
# Django settings for foiamachine project.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'foiamachine', # Or path to database file if using sqlite3.
'USER': 'foiamchine', # Not used with sqlite3.
'PASSWORD': 'xxxxxxxx', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'xxxxxxx'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'foiamachine.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'foiamachine.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"jgillum@gmail.com"
] |
jgillum@gmail.com
|
ec596f7b294b1180b3f3de8871017bdaef112020
|
9e3398214a4f12bf31e367b0b7a4810c24526a1f
|
/core/migrations/0001_initial.py
|
04fb0789ed8aa582491be59e056c957fe38e7cee
|
[] |
no_license
|
tricelex/django_ecommerce
|
7987381a240f75015d77293c16594f37cbc0c29a
|
7a9f9cb427214ea9134b59461e580616d2fc0ce5
|
refs/heads/master
| 2023-02-25T21:56:55.303780
| 2021-02-01T22:58:23
| 2021-02-01T22:58:23
| 327,143,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
# Generated by Django 3.1.5 on 2021-01-06 16:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=100)),
("price", models.FloatField()),
],
),
migrations.CreateModel(
name="OrderItem",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"item",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="core.item"
),
),
],
),
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("start_date", models.DateTimeField(auto_now_add=True)),
("ordered_date", models.DateTimeField()),
("ordered", models.BooleanField(default=False)),
("items", models.ManyToManyField(to="core.OrderItem")),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
[
"tricelex@gmail.com"
] |
tricelex@gmail.com
|
450ff22a3190894301c71874dd1f607b45be7e88
|
846119600b78936f9004bc3254aa8e6141e7c198
|
/setup.py
|
185ef6bf3b43da989871790301371722c9282f01
|
[] |
no_license
|
slaskis/django-cachalot
|
e63ef8e5cf54988a0001a29f15c907e004355169
|
2b8f001a212d7774e450a0b2bf82e7d56e8b1e61
|
refs/heads/master
| 2020-03-25T20:22:46.572296
| 2018-07-27T18:27:56
| 2018-07-27T18:27:56
| 144,128,413
| 0
| 0
| null | 2018-08-09T09:08:40
| 2018-08-09T09:08:39
| null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
from cachalot import __version__
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(CURRENT_PATH, 'requirements.txt')) as f:
required = f.read().splitlines()
setup(
name='django-cachalot',
version=__version__,
author='Bertrand Bordage',
author_email='bordage.bertrand@gmail.com',
url='https://github.com/noripyt/django-cachalot',
description='Caches your Django ORM queries '
'and automatically invalidates them.',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
license='BSD',
packages=find_packages(),
install_requires=required,
include_package_data=True,
zip_safe=False,
)
|
[
"bordage.bertrand@gmail.com"
] |
bordage.bertrand@gmail.com
|
73d8b6289524e9c1ef1e6c5e5eb989f59323f556
|
293b5687000daed5e5b610c912ca3cd41bb1c942
|
/python/EightTeV/GMSB_Lambda180_CTau10000_8TeV_pythia6_cff.py
|
71f458d3fd359a5f19e4169824815a29afdf3328
|
[] |
no_license
|
amagitte/genproductions
|
a5ea90b74c025569b8088ca8c53459cc27e3a415
|
b081e07a1f7a3843a07ca56ef98706925233b0e1
|
refs/heads/master
| 2020-05-29T11:07:03.440946
| 2015-05-21T13:17:30
| 2015-05-21T13:17:30
| 36,014,150
| 0
| 0
| null | 2015-05-21T13:24:28
| 2015-05-21T13:24:28
| null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythia6HepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythia6PylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1.0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythia6UESettingsBlock,
processParameters = cms.vstring(
'MSEL=39 ! All SUSY processes',
'IMSS(1) = 11 ! Spectrum from external SLHA file',
'IMSS(11) = 1 ! keeps gravitino mass from being overwritten',
'IMSS(21) = 33 ! LUN number for SLHA File (must be 33)',
'IMSS(22) = 33 ! Read-in SLHA decay table',
'PARJ(71)=10000. ! for which ctau 10000 mm',
'RMSS(21) = 0 ! The gravitino mass'),
parameterSets = cms.vstring('pythia6UESettings',
'processParameters',
'SLHAParameters'),
SLHAParameters = cms.vstring('SLHAFILE = Configuration/Generator/data/GMSB_Lambda180_CTau10000_pythia6.slha')
#SLHAParameters = cms.vstring('SLHAFILE = GMSB-8-TeV/8-TeV-Samples/python/GMSB_Lambda180_CTau10000_pythia6.slha')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: Configuration/GenProduction/python/EightTeV/GMSB_Lambda180_CTau10000_8TeV_pythia6_cff.py,v $'),
annotation = cms.untracked.string('GMSB Lambda=180TeV and ctau=10000 at 8 TeV')
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"dnash@cern.ch"
] |
dnash@cern.ch
|
64870bd723301749859cd4d0d6225eb595790355
|
3dcf4fe1f47ff5682b9f033dc82e58cc223ce9b4
|
/features/steps/placeholder.py
|
8af196f8af6d0e73518af2f3f09e70f1c5dfd6ed
|
[
"MIT"
] |
permissive
|
roeman/python-pptx
|
549e3e563acf527b7259795361f220c363458a3d
|
1052d94f4397730655c719d61cf858344f1da18d
|
refs/heads/master
| 2021-01-21T16:27:44.922761
| 2014-02-11T09:25:03
| 2014-02-11T09:25:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
# encoding: utf-8
"""
Gherkin step implementations for placeholder-related features.
"""
from __future__ import absolute_import
from behave import given, when, then
from hamcrest import assert_that, equal_to, is_
from pptx import Presentation
from .helpers import saved_pptx_path, test_text
# given ===================================================
@given('a bullet body placeholder')
def given_a_bullet_body_placeholder(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[1]
context.sld = context.prs.slides.add_slide(slidelayout)
context.body = context.sld.shapes.placeholders[1]
# when ====================================================
@when('I indent the first paragraph')
def step_when_indent_first_paragraph(context):
context.body.textframe.paragraphs[0].level = 1
@when("I set the title text of the slide")
def step_when_set_slide_title_text(context):
context.sld.shapes.title.text = test_text
# then ====================================================
@then('the paragraph is indented')
def then_paragraph_is_indented(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
body = sld.shapes.placeholders[1]
p = body.textframe.paragraphs[0]
assert_that(p.level, is_(equal_to(1)))
@then('the text appears in the title placeholder')
def step_then_text_appears_in_title_placeholder(context):
prs = Presentation(saved_pptx_path)
title_shape = prs.slides[0].shapes.title
title_text = title_shape.textframe.paragraphs[0].runs[0].text
assert_that(title_text, is_(equal_to(test_text)))
|
[
"scanny@cisco.com"
] |
scanny@cisco.com
|
527a127d9ca3c34c0049745107072334589feedc
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/restore_to_existing_instance_request.py
|
eaeaf16d79bd59ce2328a16f998669aec7efff70
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300
| 2021-05-26T08:54:18
| 2021-05-26T08:54:18
| 370,898,764
| 0
| 0
|
NOASSERTION
| 2021-05-26T03:50:07
| 2021-05-26T03:50:07
| null |
UTF-8
|
Python
| false
| false
| 3,600
|
py
|
# coding: utf-8
import pprint
import re
import six
class RestoreToExistingInstanceRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'body': 'RestoreToExistingInstanceRequestBody'
}
attribute_map = {
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, x_language=None, body=None):
"""RestoreToExistingInstanceRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def x_language(self):
"""Gets the x_language of this RestoreToExistingInstanceRequest.
语言
:return: The x_language of this RestoreToExistingInstanceRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this RestoreToExistingInstanceRequest.
语言
:param x_language: The x_language of this RestoreToExistingInstanceRequest.
:type: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this RestoreToExistingInstanceRequest.
:return: The body of this RestoreToExistingInstanceRequest.
:rtype: RestoreToExistingInstanceRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this RestoreToExistingInstanceRequest.
:param body: The body of this RestoreToExistingInstanceRequest.
:type: RestoreToExistingInstanceRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RestoreToExistingInstanceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
d3cc070bb3e6485c7ad08ccdbd9116c405ed25d9
|
55538c0680f270a007c64e51cf0ecd417b0929c2
|
/Dinosaur_bot.py
|
652448bc2b3d671c38a14bff4fa65f6fa041fbe4
|
[] |
no_license
|
alankrit03/PROJECTS
|
62fff0a10ec5056ce8a907ca9bd7699f3fff1567
|
b0242c680f4be0efd7d78d21aea72ce38e7356e6
|
refs/heads/master
| 2021-05-18T17:01:07.543489
| 2020-04-11T16:34:51
| 2020-04-11T16:34:51
| 251,328,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 10:19:25 2019
@author: Alankrit Agarwal
"""
from PIL import ImageGrab,ImageOps
import pyautogui
import time
from numpy import array
class coOrdinates():
replay_button = (480,460)
dinosuar = (189,439)
def restartGame():
pyautogui.click(coOrdinates.replay_button)
def jump():
pyautogui.keyDown('space')
time.sleep(0.05)
# print('Jump')
pyautogui.keyUp('space')
def imageGrab():
box = (coOrdinates.dinosuar[0]+60,coOrdinates.dinosuar[1],coOrdinates.dinosuar[0]+100,coOrdinates.dinosuar[1]+30)
image = ImageGrab.grab(box)
grayImage = ImageOps.grayscale(image)
a=array(grayImage.getcolors())
print(a.sum())
return(a.sum())
def main():
restartGame()
while(True):
if(imageGrab()!=1447):
print(imageGrab())
jump()
time.sleep(0.1)
main()
|
[
"alankritagarwal9@gmail.com"
] |
alankritagarwal9@gmail.com
|
266a16f03b660b672e82b8bea1f406c944d034fd
|
7c63a96fad4257f4959ffeba0868059fc96566fb
|
/py/m_lutz-programming_python-4_ed/code/ch_07/24-common_appearance_2/main.pyw
|
f3d0a30542bc7f07fcacfe426e4a921bf50a1d3f
|
[
"MIT"
] |
permissive
|
ordinary-developer/education
|
b426148f5690f48e0ed4853adfc3740bd038b72c
|
526e5cf86f90eab68063bb7c75744226f2c54b8d
|
refs/heads/master
| 2023-08-31T14:42:37.237690
| 2023-08-30T18:15:18
| 2023-08-30T18:15:18
| 91,232,306
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
pyw
|
from tkinter import *
from user_preferences import bcolor, bfont, bsize
class ThemedButton(Button):
def __init__(self, parent = None, **configs):
Button.__init__(self, parent, **configs)
self.pack()
self.config(bg = bcolor, font = (bfont, bsize))
def onSpam():
print('Spam')
def onEggs():
print('Eggs')
class MyButton(ThemedButton):
def __init__(self, parent = None, **configs):
ThemedButton.__init__(self, parent, **configs)
self.config(text = 'subclass')
if __name__ == '__main__':
ThemedButton(text = 'spam', command = onSpam)
ThemedButton(text = 'eggs', command = onEggs)
MyButton(command = onSpam)
mainloop()
|
[
"merely.ordinary.developer@gmail.com"
] |
merely.ordinary.developer@gmail.com
|
466ec891360a35814ebe15a57bbeee8f7f2adbbc
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02726/s686642226.py
|
b305cc29a6765e5b6e23b79cc90a1b232f86414b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import itertools
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
N,X,Y = list(map(int , input().split()))
combi = list(itertools.combinations(range(1,N+1),2))
ansList = [0 for _ in range(N)]
for i,j in combi:
kyori = min(j-i, abs(j-Y) + abs(X-i) + 1)
ansList[kyori] += 1
# print(i,j,ansList)
for ans in ansList[1:]:
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
453c5be294b70396ec8c7f1b5601a359121582f9
|
63c8b9227a6b3178d918769042ecb060acc557be
|
/cwf/gateway/fabfile.py
|
331ddde55e0aab0eb396b96748426e32c4ecab5e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
snwfdhmp/magma
|
7c4898db68d2668fd39ed25f73bb9a2bc5959066
|
8b3ff20a2717337a83c8ef531fa773a851d2e54d
|
refs/heads/master
| 2020-12-06T09:06:25.806497
| 2020-01-07T18:27:09
| 2020-01-07T18:28:51
| 232,418,366
| 1
| 0
|
NOASSERTION
| 2020-01-07T21:12:28
| 2020-01-07T21:12:27
| null |
UTF-8
|
Python
| true
| false
| 7,077
|
py
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import sys
from distutils.util import strtobool
from fabric.api import cd, env, execute, lcd, local, put, run, settings, sudo
sys.path.append('../../orc8r')
from tools.fab.hosts import ansible_setup, vagrant_setup
CWAG_ROOT = "$MAGMA_ROOT/cwf/gateway"
CWAG_INTEG_ROOT = "$MAGMA_ROOT/cwf/gateway/integ_tests"
LTE_AGW_ROOT = "../../lte/gateway"
CWAG_TEST_IP = "192.168.128.2"
TRF_SERVER_IP = "192.168.129.42"
TRF_SERVER_SUBNET = "192.168.129.0"
CWAG_BR_NAME = "cwag_br0"
CWAG_TEST_BR_NAME = "cwag_test_br0"
def integ_test(gateway_host=None, test_host=None, trf_host=None,
destroy_vm="False"):
"""
Run the integration tests. This defaults to running on local vagrant
machines, but can also be pointed to an arbitrary host (e.g. amazon) by
passing "address:port" as arguments
gateway_host: The ssh address string of the machine to run the gateway
services on. Formatted as "host:port". If not specified, defaults to
the `cwag` vagrant box.
test_host: The ssh address string of the machine to run the tests on
on. Formatted as "host:port". If not specified, defaults to the
`cwag_test` vagrant box.
trf_host: The ssh address string of the machine to run the tests on
on. Formatted as "host:port". If not specified, defaults to the
`magma_trfserver` vagrant box.
"""
destroy_vm = bool(strtobool(destroy_vm))
# Setup the gateway: use the provided gateway if given, else default to the
# vagrant machine
if not gateway_host:
vagrant_setup("cwag", destroy_vm)
else:
ansible_setup(gateway_host, "cwag", "cwag_dev.yml")
execute(_run_unit_tests)
execute(_set_cwag_configs)
cwag_host_to_mac = execute(_get_br_mac, CWAG_BR_NAME)
host = env.hosts[0]
cwag_br_mac = cwag_host_to_mac[host]
# Transfer built images from local machine to CWAG host
if gateway_host:
execute(_transfer_docker_images)
else:
execute(_stop_gateway)
execute(_build_gateway)
execute(_run_gateway)
# Setup the trfserver: use the provided trfserver if given, else default to the
# vagrant machine
with lcd(LTE_AGW_ROOT):
if not trf_host:
vagrant_setup("magma_trfserver", destroy_vm)
else:
ansible_setup(trf_host, "trfserver", "magma_trfserver.yml")
execute(_start_trfserver)
# Run the tests: use the provided test machine if given, else default to
# the vagrant machine
if not test_host:
vagrant_setup("cwag_test", destroy_vm)
else:
ansible_setup(test_host, "cwag_test", "cwag_test.yml")
cwag_test_host_to_mac = execute(_get_br_mac, CWAG_TEST_BR_NAME)
host = env.hosts[0]
cwag_test_br_mac = cwag_test_host_to_mac[host]
execute(_set_cwag_test_configs)
execute(_start_ue_simulator)
# Get back to the gateway vm to setup static arp
if not gateway_host:
vagrant_setup("cwag", destroy_vm)
else:
ansible_setup(gateway_host, "cwag", "cwag_dev.yml")
execute(_set_cwag_networking, cwag_test_br_mac)
# Start tests
if not test_host:
vagrant_setup("cwag_test", destroy_vm)
else:
ansible_setup(test_host, "cwag_test", "cwag_test.yml")
execute(_set_cwag_test_networking, cwag_br_mac)
execute(_run_integ_tests, test_host, trf_host)
def _transfer_docker_images():
output = local("docker images cwf_*", capture=True)
for line in output.splitlines():
if not line.startswith('cwf'):
continue
line = line.rstrip("\n")
image = line.split(" ")[0]
local("docker save -o /tmp/%s.tar %s" % (image, image))
put("/tmp/%s.tar" % image, "%s.tar" % image)
local("rm -f /tmp/%s.tar" % image)
run('docker load -i %s.tar' % image)
def _set_cwag_configs():
""" Set the necessary config overrides """
with cd(CWAG_INTEG_ROOT):
sudo('mkdir -p /var/opt/magma')
sudo('mkdir -p /var/opt/magma/configs')
sudo('cp gateway.mconfig /var/opt/magma/configs/')
def _set_cwag_networking(mac):
sudo('arp -s %s %s' % (CWAG_TEST_IP, mac))
def _get_br_mac(bridge_name):
mac = run("cat /sys/class/net/%s/address" % bridge_name)
return mac
def _set_cwag_test_configs():
""" Set the necessary test configs """
sudo('mkdir -p /etc/magma')
# Create empty uesim config
sudo('touch /etc/magma/uesim.yml')
def _set_cwag_test_networking(mac):
# Don't error if route already exists
with settings(warn_only=True):
sudo('ip route add %s/24 dev %s proto static scope link' %
(TRF_SERVER_SUBNET, CWAG_TEST_BR_NAME))
sudo('arp -s %s %s' % (TRF_SERVER_IP, mac))
def _stop_gateway():
""" Stop the gateway docker images """
with cd(CWAG_ROOT + '/docker'):
sudo(' docker-compose'
' -f docker-compose.yml'
' -f docker-compose.override.yml'
' -f docker-compose.integ-test.yml'
' down')
def _build_gateway():
""" Builds the gateway docker images """
with cd(CWAG_ROOT + '/docker'):
sudo(' docker-compose'
' -f docker-compose.yml'
' -f docker-compose.override.yml'
' -f docker-compose.integ-test.yml'
' build --parallel')
def _run_gateway():
""" Runs the gateway's docker images """
with cd(CWAG_ROOT + '/docker'):
sudo(' docker-compose'
' -f docker-compose.yml'
' -f docker-compose.override.yml'
' -f docker-compose.integ-test.yml'
' up -d ')
def _start_ue_simulator():
""" Starts the UE Sim Service """
with cd(CWAG_ROOT + '/services/uesim/uesim'):
run('tmux new -d \'go run main.go\'')
def _start_trfserver():
""" Starts the traffic gen server"""
run('nohup iperf3 -s -B %s > /dev/null &' % TRF_SERVER_IP, pty=False)
def _run_unit_tests():
""" Run the cwag unit tests """
with cd(CWAG_ROOT):
run('make test')
def _run_integ_tests(test_host, trf_host):
""" Run the integration tests """
with cd(CWAG_INTEG_ROOT):
result = run('make integ_test', warn_only=True)
if not test_host and not trf_host:
# Clean up only for now when running locally
execute(_clean_up)
if result.return_code == 0:
print("Integration Test Passed!")
sys.exit(0)
else:
print("Integration Test returned ", result.return_code)
sys.exit(result.return_code)
def _clean_up():
# already in cwag test vm at this point
# Kill uesim service
run('pkill go', warn_only=True)
with lcd(LTE_AGW_ROOT):
vagrant_setup("magma_trfserver", False)
run('pkill iperf3 > /dev/null &', pty=False, warn_only=True)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
1a87536b6ff79a033ee52fef26498ed28d0bd950
|
2820f09d142c00a9d6b24373f6c5ef502c4df75f
|
/Robot_Arm/gui/owirobot.py
|
92d18c07b41d054b7e17175849d7829273ee8d64
|
[] |
no_license
|
IdeasGarage/code_club
|
53a08bb5c122ea69194fd2360e490b7fa2aa3d03
|
f638963b4bc0dd2c0d3473fb2c2a5bb3f7a3d185
|
refs/heads/master
| 2020-04-12T03:12:47.481116
| 2017-01-24T17:18:04
| 2017-01-24T17:18:04
| 43,813,443
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,455
|
py
|
"""
Copyright 2013 Steve Battle
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import usb.core
import usb.util
import time
from Tkinter import *
root = Tk()
dev = usb.core.find(idVendor=0x1267, idProduct=0);
if dev is None:
raise ValueError('Device not found')
STOP = [0,0]
GRIP_CLOSE = [1,0]
GRIP_OPEN = [2,0]
WRIST_UP = [4,0]
WRIST_DOWN = [8,0]
ELBOW_UP = [16,0]
ELBOW_DOWN = [32,0]
SHOULDER_UP = [64,0]
SHOULDER_DOWN= [128,0]
BASE_COUNTERCLOCKWISE = [0,1]
BASE_CLOCKWISE = [0,2]
duration = 1
def command2(cmd, light):
c = list(cmd)
c.append(light)
dev.ctrl_transfer(0x40,6,0x100,0,c,1000)
def command(cmd, light, duration):
c = list(cmd)
c.append(light)
dev.ctrl_transfer(0x40,6,0x100,0,c,1000)
time.sleep(duration)
c = list(STOP)
c.append(light)
dev.ctrl_transfer(0x40,6,0x100,0,c,1000)
class Window:
def __init__(self, parent):
frame = Frame(parent)
frame.pack()
Label(frame, text="OWI Robot").grid(row=0, column=1)
Label(frame, text="Gripper:").grid(row=1)
self.btn1 = Button(frame, text="close", command=self.grip_close)
self.btn1.grid(row=1, column=1)
self.btn2 = Button(frame, text="open", command=self.grip_open)
self.btn2.grid(row=1, column=2)
Label(frame, text="Wrist:").grid(row=2)
self.wristScale = Scale(frame, from_=-1, to=1, orient=HORIZONTAL, command=self.wrist)
self.wristScale.grid(row=2, column=1)
Label(frame, text="Elbow:").grid(row=3)
self.elbowScale = Scale(frame, from_=-1, to=1, orient=HORIZONTAL, command=self.elbow)
self.elbowScale.grid(row=3, column=1)
Label(frame, text="Shoulder:").grid(row=4)
self.shoulderScale = Scale(frame, from_=-1, to=1, orient=HORIZONTAL, command=self.shoulder)
self.shoulderScale.grid(row=4, column=1)
Label(frame, text="Base:").grid(row=5)
self.baseScale = Scale(frame, from_=-1, to=1, orient=HORIZONTAL, command=self.base)
self.baseScale.grid(row=5, column=1)
self.lightVar = IntVar()
self.cb = Checkbutton(frame, text="Light", command=self.light, variable=self.lightVar, offvalue=0, onvalue=1)
self.cb.grid(row=6)
def grip_close(self):
command(GRIP_CLOSE,self.lightVar.get(),duration)
def grip_open(self):
command(GRIP_OPEN,self.lightVar.get(),duration)
def light(self):
command2(STOP,self.lightVar.get())
def wrist(self, value):
self.elbowScale.set(0)
self.shoulderScale.set(0)
self.baseScale.set(0)
if int(value)<0:
command2(WRIST_DOWN,self.lightVar.get())
elif int(value)>0:
command2(WRIST_UP, self.lightVar.get())
else:
command2(STOP, self.lightVar.get())
def elbow(self, value):
self.wristScale.set(0)
self.shoulderScale.set(0)
self.baseScale.set(0)
if int(value)<0:
command2(ELBOW_DOWN,self.lightVar.get())
elif int(value)>0:
command2(ELBOW_UP, self.lightVar.get())
else:
command2(STOP, self.lightVar.get())
def shoulder(self, value):
self.wristScale.set(0)
self.elbowScale.set(0)
self.baseScale.set(0)
if int(value)<0:
command2(SHOULDER_DOWN,self.lightVar.get())
elif int(value)>0:
command2(SHOULDER_UP, self.lightVar.get())
else:
command2(STOP, self.lightVar.get())
def base(self, value):
self.wristScale.set(0)
self.elbowScale.set(0)
self.shoulderScale.set(0)
if int(value)>0:
command2(BASE_COUNTERCLOCKWISE,self.lightVar.get())
elif int(value)<0:
command2(BASE_CLOCKWISE, self.lightVar.get())
else:
command2(STOP, self.lightVar.get())
window = Window(root)
root.mainloop()
|
[
"you@example.com"
] |
you@example.com
|
09ae244ad99bc60aff2c70c0e43895b0cff2b546
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/AllenInstitute_dipde/dipde-master/dipde/internals/connection.py
|
fff691738825b6ce043b658098641e70501c748e
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,377
|
py
|
"""Module containing Connection class, connections between source and target populations."""
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from dipde.internals import utilities as util
from dipde.internals import ConnectionDistribution
class Connection(object):
'''Class that connects dipde source population to dipde target population.
The Connection class handles all of the details of propogating connection
information between a source population (dipde ExtermalPopulation or
InternalPopulation) and a target population (dipde InternalPopulation).
Handles delay information via a delay queue that is rolled on each timestep,
and reuses connection information by using a ConnectionDistribution object
with a specific signature to avoid duplication among identical connections.
Parameters
----------
source : InternalPopulation or ExternalPopulation
Source population for connection.
target : InternalPopulation
Target population for connection.
nsyn : int
In-degree of connectivity from source to target.
weights : list
Weights defining synaptic distribution (np.ndarray).
probs : list (same length as weights, and sums to 1)
Probabilities corresponding to weights.
delay: float (default=0)
Transmission delay (units: sec).
metadata: Connection metadata, all other kwargs
'''
def __init__(self,
source,
target,
nsyn,
**kwargs):
self.source = source
self.target = target
self.nsyn = nsyn
self.weights = kwargs.pop('weights', None)
self.probs = kwargs.pop('probs', None)
self.delay = float(kwargs.pop('delay', 0))
self.metadata = kwargs
if self.weights != None or self.probs != None:
assert len(self.weights) == len(self.probs)
else:
self.weights, self.probs = util.descretize(kwargs.get('distribution', None),
kwargs.get('N', None),
scale=kwargs.get('scale', None))
assert np.abs(self.probs).sum() == 1
# Defined at runtime:
self.delay_queue = None
self.delay_ind = None
self.simulation = None
def initialize(self):
'''Initialize the connection at the beginning of a simulation.
Calling this method:
1) Initializes a delay queue used to store values of inputs in a last-in-first-out rolling queue.
2) Creates a connection_distribution object for the connection, if a suitable object is not already registered with the simulation-level connection distribution collection.
This method is called by the Simulation object (initialization method),
but can also be called by a user when defining an alternative time
stepping loop.
'''
self.initialize_delay_queue()
self.initialize_connection_distribution()
def initialize_connection_distribution(self):
"""Create connection distribution, if necessary.
If the signature of this connection is already registered to the
simulation-level connection distribution collection, it is associated
with self. If not, it adds the connection distribution to the
collection, and associates it with self.
"""
conn_dist = ConnectionDistribution(self.target.edges, self.weights, self.probs)
conn_dist.simulation = self.simulation
self.simulation.connection_distribution_collection.add_connection_distribution(conn_dist)
self.connection_distribution = self.simulation.connection_distribution_collection[conn_dist.signature]
def initialize_delay_queue(self):
"""Initialiaze a delay queue for the connection.
The delay attribute of the connection defines the transmission delay of
the signal from the souce to the target. Firing rate values from the
source population are held in a queue, discretized by the timestep, that
is rolled over once per timestep. if source is an ExternalPopulation,
the queue is initialized to the firing rate at t=0; if the source is an
InternalPopulation, the queue is initialized to zero.
"""
# Set up delay queue:
self.delay_ind = int(np.round(self.delay/self.simulation.dt))
if self.source.type == 'internal':
self.delay_queue = np.core.numeric.ones(self.delay_ind+1)*self.source.curr_firing_rate
elif self.source.type == 'external':
self.delay_queue = np.core.numeric.zeros(self.delay_ind+1)
for i in range(len(self.delay_queue)):
self.delay_queue[i] = self.source.firing_rate(self.simulation.t - self.simulation.dt*i)
self.delay_queue = self.delay_queue[::-1]
else:
raise Exception('Unrecognized source type: "%s"' % self.source.type) # pragma: no cover
def update(self):
"""Update Connection, called once per timestep."""
self.delay_queue[0] = self.source.curr_firing_rate
self.delay_queue = np.core.numeric.roll(self.delay_queue, -1)
@property
def curr_delayed_firing_rate(self):
"""Current firing rate of the source (float).
Property that accesses the firing rate at the top of the delay queue,
from the source population.
"""
try:
return self.delay_queue[0]
except:
self.initialize_delay_queue()
return self.delay_queue[0]
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
1d281f4a7fba806ffa6a3d7a7354d1fbe7f9fd36
|
71d0a467e2c3c534456ec28f4634fb8efd13a145
|
/backend/users/migrations/0002_auto_20201018_0819.py
|
721547007279241c4f88e7bad632aad2c34a19eb
|
[] |
no_license
|
crowdbotics-apps/demo-21662
|
4167ddd99751c3f9352e0cd8752177c4442ecd3f
|
e6a1de517cc1ae79d0fd58506e604f1ec2258307
|
refs/heads/master
| 2022-12-31T07:43:46.969806
| 2020-10-18T08:19:38
| 2020-10-18T08:19:38
| 305,053,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 2.2.16 on 2020-10-18 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
7a6d25498f7e955b9ca6d73293ef58f257f2ff44
|
1c72aa6d53c886d8fb8ae41a3e9b9c6c4dd9dc6f
|
/Semester 1/Week 6/test_module.py
|
bbc1c68bb40bb4cdfe43201388e9fc2e1ed27b9c
|
[] |
no_license
|
codebubb/python_course
|
74761ce3189d67e3aff964c056aeab27d4e94d4a
|
4a6ed4a64e6a726d886add8364c65956d5053fc2
|
refs/heads/master
| 2021-01-11T03:06:50.519208
| 2016-07-29T10:47:12
| 2016-10-17T10:42:29
| 71,114,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
import text_analysis
str = "The quick brown fox jumped over the lazy dog"
print "The string has a an average word length of", text_analysis.average_length(str.split())
print "The last word is ", text_analysis.last_word(str)
print "The position in the alphabet of the first letter of the last word is", text_analysis.position_in_alphabet(text_analysis.last_word(str)[0])
|
[
"jpbubb82@gmail.com"
] |
jpbubb82@gmail.com
|
c92417f537f0d13073d8166e8e78b658f05ed99b
|
a210132dd3e87772b4e1f3ef246ea8da4d8646e7
|
/cmdb1_demo1/cmdb1_demo1_app1/migrations/0030_auto_20190112_1339.py
|
5a0b6b06c4f85f641cf0c6107f747de923245c3d
|
[] |
no_license
|
hongmalong/cmdb1
|
8574b21e81fb1833ec75d96fa771073ab9c360b3
|
8f2ba9afa549e24d9b406ff5b00a76dec31dd5ac
|
refs/heads/master
| 2021-07-01T14:29:31.278517
| 2019-04-18T05:04:54
| 2019-04-18T05:04:54
| 124,845,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,148
|
py
|
# Generated by Django 2.0.3 on 2019-01-12 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmdb1_demo1_app1', '0029_auto_20190112_1337'),
]
operations = [
migrations.CreateModel(
name='EventTable',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID')),
('eventId', models.CharField(max_length=200, unique=True)),
('status', models.CharField(default=None, max_length=200, null=True)),
],
),
migrations.AlterField(
model_name='cabinettable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='companytable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='deploylogtable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='enviromenttable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='equipmenttable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='equipmenttypetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='historytable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='logpathtable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='nodetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='occupationtable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='porttable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='privatetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='projecttable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='providertable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='serverroomtable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='servicetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='servicetypetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='servicetypetable',
name='portNumber',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
]
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
3ed8f4ab078ac3c57086c22a5dd0db94514f07af
|
cea30aead7f0b529ee072c1bcab2896777e1408d
|
/PreprocessingCropsData/venv/Lib/site-packages/sklearn/datasets/_covtype.py
|
f705af97d2dfeb6b3a67a9d38481d1b500f0f750
|
[] |
no_license
|
pgj9702/FarmSolution
|
3730ab3ca983b335ed48a60935c5fa6e3983cbb1
|
a8cacc45b8519e79b51ab65b9539a01f5006e64f
|
refs/heads/master
| 2023-03-30T15:41:10.312044
| 2021-03-31T08:47:23
| 2021-03-31T08:47:23
| 334,019,778
| 0
| 1
| null | 2021-02-22T09:32:57
| 2021-01-29T02:52:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,646
|
py
|
"""Forest covertype dataset.
A classic dataset for classification benchmarks, featuring categorical and
real-valued features.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/datasets/Covertype
Courtesy of Jock A. Blackard and Colorado State University.
"""
# Author: Lars Buitinck
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
from gzip import GzipFile
import logging
from os.path import dirname, exists, join
from os import remove, makedirs
import numpy as np
import joblib
from . import get_data_home
from ._base import _convert_data_dataframe
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ._base import _pkl_filepath
from ..utils import check_random_state
from ..utils.validation import _deprecate_positional_args
# The original area_data can be found in:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz
ARCHIVE = RemoteFileMetadata(
filename='covtype.area_data.gz',
url='https://ndownloader.figshare.com/files/5976039',
checksum=('614360d0257557dd1792834a85a1cdeb'
'fadc3c4f30b011d56afee7ffb5b15771'))
logger = logging.getLogger(__name__)
# Column names reference:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info
FEATURE_NAMES = ["Elevation",
"Aspect",
"Slope",
"Horizontal_Distance_To_Hydrology",
"Vertical_Distance_To_Hydrology",
"Horizontal_Distance_To_Roadways",
"Hillshade_9am",
"Hillshade_Noon",
"Hillshade_3pm",
"Horizontal_Distance_To_Fire_Points"]
FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)]
FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)]
TARGET_NAMES = ["Cover_Type"]
@_deprecate_positional_args
def fetch_covtype(*, data_home=None, download_if_missing=True,
random_state=None, shuffle=False, return_X_y=False,
as_frame=False):
"""Load the covertype dataset (classification).
Download it if necessary.
================= ============
Classes 7
Samples total 581012
Dimensionality 54
Features int
================= ============
Read more in the :ref:`User Guide <covtype_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the preprocessing_datasets. By default
all scikit-learn area_data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the area_data is not locally available
instead of trying to download the area_data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(area_data.area_data, area_data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the area_data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is a pandas DataFrame or
Series depending on the number of target columns. If `return_X_y` is
True, then (`area_data`, `target`) will be pandas DataFrames or Series as
described below.
.. versionadded:: 0.24
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
area_data : ndarray of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
target : ndarray of shape (581012,)
Each value corresponds to one of
the 7 forest covertypes with values
ranging between 1 to 7.
frame : dataframe of shape (581012, 53)
Only present when `as_frame=True`. Contains `area_data` and `target`.
DESCR : str
Description of the forest covertype dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
(area_data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
if not exists(covtype_dir):
makedirs(covtype_dir)
logger.info("Downloading %s" % ARCHIVE.url)
archive_path = _fetch_remote(ARCHIVE, dirname=covtype_dir)
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=',')
# delete archive
remove(archive_path)
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32, copy=False)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
elif not available and not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'covtype.rst')) as rst_file:
fdescr = rst_file.read()
frame = None
if as_frame:
frame, X, y = _convert_data_dataframe(caller_name="fetch_covtype",
data=X,
target=y,
feature_names=FEATURE_NAMES,
target_names=TARGET_NAMES)
if return_X_y:
return X, y
return Bunch(data=X,
target=y,
frame=frame,
target_names=TARGET_NAMES,
feature_names=FEATURE_NAMES,
DESCR=fdescr)
|
[
"cao147147@naver.com"
] |
cao147147@naver.com
|
3e57cd7b2ce4397e6d921fac46ff9af95062da80
|
1adc05008f0caa9a81cc4fc3a737fcbcebb68995
|
/hardhat/recipes/ocaml/coq.py
|
9a02ca553f2d115228f920a99f746e35bcdf6ff9
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
stangelandcl/hardhat
|
4aa995518697d19b179c64751108963fa656cfca
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
refs/heads/master
| 2021-01-11T17:19:41.988477
| 2019-03-22T22:18:44
| 2019-03-22T22:18:52
| 79,742,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
from ..base import GnuRecipe
class CoqRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(CoqRecipe, self).__init__(*args, **kwargs)
self.sha256 = '6e3c3cf5c8e2b0b760dc52738e2e849f' \
'3a8c630869659ecc0cf41413fcee81df'
self.name = 'coq'
self.version = '8.6'
self.depends = ['ocaml-findlib', 'ocaml-camlp5']
self.url = 'https://coq.inria.fr/distrib/V$version/files/' \
'coq-$version.tar.gz'
self.configure_args = self.shell_args + [
'configure',
'-prefix',
self.prefix_dir]
def need_configure(self):
return True
|
[
"clayton.stangeland@gmail.com"
] |
clayton.stangeland@gmail.com
|
001444d97273fc955bdf648d43cc6b89ce9194c9
|
3cd18a3e789d3a0739768f1ae848d9f74b9dbbe7
|
/mounth001/day24/text/file_write_2.py
|
57e6dfeb11a4a1e289915f4535f8acaa37be0ae6
|
[] |
no_license
|
Molly-l/66
|
4bfe2f93e726d3cc059222c93a2bb3460b21ad78
|
fae24a968f590060522d30f1b278fcfcdab8b36f
|
refs/heads/master
| 2020-09-28T12:50:18.590794
| 2019-11-27T04:42:28
| 2019-11-27T04:42:28
| 226,782,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
'''(r文件不存在报错,w,a会新建)w清空重写,a追加'''
f=open('w01.txt','ab')
# f.write('111')
bytes1='dfyt'.encode()#字符串转换二进制
f.write(bytes1)
f.close()
|
[
"769358744@qq.com"
] |
769358744@qq.com
|
a269f90cf7a7c85403e4d0201d5daf68e1d31eb9
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2652/60705/266739.py
|
3f6d30e69a9b783eadfdca3eaeda2a8df1e79ad9
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
[N, C, F] = list(map(int, input().split(" ")))
print("N:" + str(N))
print("C:" + str(C))
print("F:" + str(F))
grades = {}
prices = {}
for students in range(0, C):
[grade, price] = list(map(int, input().split(" ")))
grades[students] = grade
prices[students] = price
print(grades)
print(prices)
li = sorted(grades.items(), key=lambda k: k[1])
print(li)
index = C - 1
while index >= N-1:
p = 0
for i in range(index, index - N, -1):
print(index)
p += prices[li[index][0]]
print("p="+str(p))
if p < F:
break
index -= 1
if index < N - 1:
print(-1)
else:
grad = []
for i in range(index, index-N, -1):
grad.append(grades[li[i][0]])
print(grad)
grad.sort()
if N % 2 == 1:
print(grad[N // 2])
else:
print(int((grad[N//2] + grad[N//2 - 1]) / 2))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
bc4300f15098e0e87232005f0927580177ee0c50
|
8d3b5db62ec817a81d97d2bf741166da20fecffc
|
/tensorforce/core/policies/action_value.py
|
4b09436aff4b0089f8fc12aa9de65b535dfcc954
|
[
"Apache-2.0"
] |
permissive
|
stheid/tensorforce
|
6ffe918054d1b298c0c4cf4de9a669d500c0983d
|
c4cce421be650d7500125b793b59aaeb92ffdf51
|
refs/heads/master
| 2022-11-23T19:15:39.336310
| 2020-06-11T16:04:08
| 2020-06-11T16:04:08
| 271,549,628
| 0
| 0
|
Apache-2.0
| 2020-06-11T16:04:09
| 2020-06-11T13:12:16
| null |
UTF-8
|
Python
| false
| false
| 5,112
|
py
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
import tensorflow as tf
from tensorforce import util
from tensorforce.core.policies import Policy
class ActionValue(Policy):
"""
Base class for action-value-based policies.
Args:
name (string): Module name
(<span style="color:#0000C0"><b>internal use</b></span>).
states_spec (specification): States specification
(<span style="color:#0000C0"><b>internal use</b></span>).
actions_spec (specification): Actions specification
(<span style="color:#0000C0"><b>internal use</b></span>).
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
summary_labels ('all' | iter[string]): Labels of summaries to record
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
"""
def tf_act(self, states, internals, auxiliaries, return_internals):
assert return_internals
actions_values = self.actions_values(
states=states, internals=internals, auxiliaries=auxiliaries
)
actions = OrderedDict()
for name, spec, action_values in util.zip_items(self.actions_spec, actions_values):
actions[name] = tf.math.argmax(
input=action_values, axis=-1, output_type=util.tf_dtype(spec['type'])
)
return actions
def tf_states_value(
self, states, internals, auxiliaries, reduced=True, include_per_action=False
):
states_values = self.states_values(
states=states, internals=internals, auxiliaries=auxiliaries
)
for name, spec, states_value in util.zip_items(self.actions_spec, states_values):
states_values[name] = tf.reshape(
tensor=states_value, shape=(-1, util.product(xs=spec['shape']))
)
states_value = tf.concat(values=tuple(states_values.values()), axis=1)
if reduced:
states_value = tf.math.reduce_mean(input_tensor=states_value, axis=1)
if include_per_action:
for name in self.actions_spec:
states_values[name] = tf.math.reduce_mean(
input_tensor=states_values[name], axis=1
)
if include_per_action:
states_values['*'] = states_value
return states_values
else:
return states_value
def tf_actions_value(
self, states, internals, auxiliaries, actions, reduced=True, include_per_action=False
):
actions_values = self.actions_values(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions
)
for name, spec, actions_value in util.zip_items(self.actions_spec, actions_values):
actions_values[name] = tf.reshape(
tensor=actions_value, shape=(-1, util.product(xs=spec['shape']))
)
actions_value = tf.concat(values=tuple(actions_values.values()), axis=1)
if reduced:
actions_value = tf.math.reduce_mean(input_tensor=actions_value, axis=1)
if include_per_action:
for name in self.actions_spec:
actions_values[name] = tf.math.reduce_mean(
input_tensor=actions_values[name], axis=1
)
if include_per_action:
actions_values['*'] = actions_value
return actions_values
else:
return actions_value
def tf_states_values(self, states, internals, auxiliaries):
if not all(spec['type'] == 'int' for spec in self.actions_spec.values()):
raise NotImplementedError
actions_values = self.actions_values(
states=states, internals=internals, auxiliaries=auxiliaries
)
states_values = OrderedDict()
for name, spec, action_values in util.zip_items(self.actions_spec, actions_values):
states_values[name] = tf.math.reduce_max(input_tensor=action_values, axis=-1)
return states_values
def tf_actions_values(self, states, internals, auxiliaries, actions=None):
raise NotImplementedError
|
[
"alexkuhnle@t-online.de"
] |
alexkuhnle@t-online.de
|
10ca4009d3fee037182f4df9155038fb40fe9114
|
93fc75b62e3fb6524f3891daf58772175fee781c
|
/沈书颖/2017310398-沈书颖-第五次作业-金工17-1/2017310398-沈书颖-第五次作业-金工17-1/10-6 10-7.py
|
4555a8454fea0638428be4e32b60903555d42d31
|
[] |
no_license
|
jingong171/jingong-homework
|
13174a4a7b39b8ae6d5da103cbf0fb40766d59c1
|
542e8781f26676a62538714b92fb0bccdf41b47b
|
refs/heads/master
| 2020-03-29T13:38:34.152280
| 2018-12-17T14:38:08
| 2018-12-17T14:38:08
| 149,974,131
| 8
| 11
| null | 2018-10-08T14:40:58
| 2018-09-23T10:32:35
|
Python
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
while True:
number_a=input("请输入一个数字:")
number_b=input("请再输入一个数字:")
#提示用户输入两个数字
try:
answer=int(number_a)+int(number_b)
except ValueError:
print("请输入数字!")
#如果输入的不是数字,则给出提示
else:
print(answer)
#如果是数字就给出答案
|
[
"35986375+FrancisLau098@users.noreply.github.com"
] |
35986375+FrancisLau098@users.noreply.github.com
|
33819073c9a333a74a0dd3cd161259efaae88da2
|
44e6e50a4d2e0095e055e59a6250b8ccf327f844
|
/morepath/converter.py
|
a87ca28927e2ff16e91ad410496591cae56631d0
|
[] |
no_license
|
iapilgrim/morepath
|
14f8f4d457fa9e9a5a2342f659e03e962209f5a0
|
f173860661d00f2b3a684d2c512a1741d40cc26a
|
refs/heads/master
| 2020-12-26T03:44:35.396492
| 2014-02-13T08:31:40
| 2014-02-13T08:31:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,214
|
py
|
from reg.mapping import Map, ClassMapKey
class Converter(object):
"""A converter that knows how to decode from string to object and back.
Used for decoding/encoding URL parameters and path parameters.
"""
def __init__(self, decode, encode=None):
"""Create new converter.
:param decode: function that given string can decode it into object.
:param encode: function that given object can encode it into object.
"""
self.decode = decode
self.encode = encode or unicode
def __eq__(self, other):
return (self.decode is other.decode and
self.encode is other.encode)
def __ne__(self, other):
return not self == other
IDENTITY_CONVERTER = Converter(lambda s: s, lambda s: s)
class ConverterRegistry(object):
"""A registry for converters.
Used to decode/encode URL parameters and path variables used
by the :meth:`morepath.AppBase.path` directive.
Is aware of inheritance.
"""
def __init__(self):
self._map = Map()
def register_converter(self, type, converter):
"""Register a converter for type.
:param type: the Python type for which to register
the converter.
:param converter: a :class:`morepath.Converter` instance.
"""
self._map[ClassMapKey(type)] = converter
def converter_for_type(self, type):
"""Get converter for type.
Is aware of inheritance; if nothing is registered for given
type will return converter registered for base class.
:param type: The type for which to look up the converter.
:returns: a :class:`morepath.Converter` instance.
"""
return self._map.get(ClassMapKey(type))
def converter_for_value(self, v):
"""Get converter for value.
Is aware of inheritance; if nothing is registered for type of
given value will return converter registered for base class.
:param value: The value for which to look up the converter.
:returns: a :class:`morepath.Converter` instance.
"""
if v is None:
return IDENTITY_CONVERTER
return self.converter_for_type(type(v))
|
[
"faassen@startifact.com"
] |
faassen@startifact.com
|
8b82b13b6a0ec3017b98b71e4834d9de191a7e53
|
3712a929d1124f514ea7af1ac0d4a1de03bb6773
|
/开班笔记/python数据分析机器学习部分/机器学习/day04/score.py
|
03c4d24bda3a74f02be17657cf046736a6d8e985
|
[] |
no_license
|
jiyabing/learning
|
abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9
|
6059006b0f86aee9a74cfc116d2284eb44173f41
|
refs/heads/master
| 2020-04-02T20:47:33.025331
| 2018-10-26T05:46:10
| 2018-10-26T05:46:10
| 154,779,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import sklearn.cluster as sc
import sklearn.metrics as sm
import matplotlib.pyplot as mp
x = []
with open('../../data/perf.txt', 'r') as f:
for line in f.readlines():
data = [float(substr) for substr
in line.split(',')]
x.append(data)
x = np.array(x)
clstrs, scores, models = np.arange(2, 11), [], []
for n_clusters in clstrs:
model = sc.KMeans(init='k-means++',
n_clusters=n_clusters)
model.fit(x)
score = sm.silhouette_score(
x, model.labels_, sample_size=len(x),
metric='euclidean')
scores.append(score)
models.append(model)
scores = np.array(scores)
best_index = scores.argmax()
best_clstr = clstrs[best_index]
print(best_clstr)
best_score = scores[best_index]
print(best_score)
best_model = models[best_index]
centers = best_model.cluster_centers_
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
grid_x = np.meshgrid(np.arange(l, r, h),
np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()]
flat_y = best_model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
pred_y = best_model.predict(x)
mp.figure('K-Means Cluster', facecolor='lightgray')
mp.title('K-Means Cluster', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray')
mp.scatter(x[:, 0], x[:, 1], c=pred_y, cmap='brg', s=60)
mp.scatter(centers[:, 0], centers[:, 1], marker='+',
c='gold', s=1000, linewidth=1)
mp.show()
|
[
"yabing_ji@163.com"
] |
yabing_ji@163.com
|
cfc990e4788553a5ae0cb0ae98b5d5f6046e27fa
|
0670d89e5d7b91d86b181e0b6cfdbef8b3b9e9e6
|
/p2/api/migrations/0001_initial.py
|
59146617b32f664e13e38f1dec9edb666ee6eefe
|
[
"MIT"
] |
permissive
|
BeryJu/p2
|
dfe570afb420843033e519350f5b89e992878a6b
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
refs/heads/master
| 2020-12-04T01:21:21.197822
| 2019-08-29T16:02:21
| 2019-08-29T16:02:21
| 231,549,415
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,347
|
py
|
# Generated by Django 2.2 on 2019-05-03 17:56
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import p2.api.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('p2_core', '0017_auto_20190503_1755'),
]
operations = [
migrations.CreateModel(
name='APIKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('access_key', models.CharField(default=p2.api.models.get_access_key, max_length=20)),
('secret_key', models.CharField(default=p2.api.models.get_secret_key, max_length=40)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('volume', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='p2_core.Volume')),
],
options={
'verbose_name': 'API Key',
'verbose_name_plural': 'API Keys',
'unique_together': {('access_key', 'secret_key')},
},
),
]
|
[
"jens.langhammer@beryju.org"
] |
jens.langhammer@beryju.org
|
87ed7b1213ff1c5ac14a3b8964f9b62568a734e3
|
350ade9361645f87d96589a0c90c76d8a951832b
|
/CP4/ICCP4_9.py
|
d2b59b71c9a9c7a4c57f25ad84303e7d47c670d7
|
[] |
no_license
|
dongzeyuan/Practise
|
becf7c7ca15928213aa22ae15bd8b3f1f9b7dc8b
|
ecef4466d30c5c9e88e766b4f3df6db24959b9d3
|
refs/heads/master
| 2021-09-21T02:06:24.629708
| 2018-08-19T08:50:02
| 2018-08-19T08:50:02
| 119,028,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
# coding=UTF-8
for i in range(1,5):
print('*'*7)
print('\n')
for i in range(1, 5):
print('*'*(2*i-1))
print('\n')
|
[
"dongfujing88@gmail.com"
] |
dongfujing88@gmail.com
|
4b54bbdeef19d909ddd5bef71dfe0625ab46195f
|
9f930df50f28e6cbc74089057fb4418460a7f657
|
/regsoft/migrations/0001_initial.py
|
494f8a33ccdd12c1f25b1bc6e07ef0d7edadaeb4
|
[
"MIT"
] |
permissive
|
xxyyzz/apogee-2016
|
2c57b3c48334e798cab560d6525567da9b2ede61
|
c55f6427bbe246060aacbeb831e1519fb051a1b1
|
refs/heads/master
| 2021-05-16T11:55:21.525340
| 2017-09-07T19:05:00
| 2017-09-07T19:05:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bhavan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('room', models.CharField(max_length=50)),
('vacancy', models.IntegerField()),
('bhavan', models.ForeignKey(to='regsoft.Bhavan')),
],
),
]
|
[
"satwik55@gmail.com"
] |
satwik55@gmail.com
|
e3e9ffd130063e0f3807af450237e3f23ff8ab2d
|
608ca6de9e2de70312dd3abc6fdf7d9aef326b53
|
/feder/institutions/migrations/0010_auto_20170808_0252.py
|
7c2c74204332edd5346dca93c2a0c7bf28d4b60d
|
[
"MIT"
] |
permissive
|
watchdogpolska/feder
|
4d3f3a5de5d2d5d266bf34dea4969f583f9f1aa0
|
57cfde4aa8680c08758ee531d69a40b0f7f1d9d7
|
refs/heads/master
| 2023-08-18T04:52:45.284855
| 2023-08-16T10:54:03
| 2023-08-16T10:54:03
| 40,154,083
| 18
| 12
|
MIT
| 2023-08-22T11:35:15
| 2015-08-04T00:10:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
# Generated by Django 1.11.2 on 2017-08-08 02:52
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [("institutions", "0009_auto_20170708_2222")]
operations = [
migrations.AddField(
model_name="institution",
name="extra",
field=jsonfield.fields.JSONField(
default={}, verbose_name=b"Unorganized additional information"
),
preserve_default=False,
),
migrations.AddField(
model_name="institution",
name="parents",
field=models.ManyToManyField(
blank=True,
null=True,
related_name="_institution_parents_+",
to="institutions.Institution",
verbose_name="Parent institutions",
),
),
migrations.AddField(
model_name="institution",
name="regon",
field=models.CharField(
blank=True, max_length=14, verbose_name="REGON number"
),
),
]
|
[
"naczelnik@jawnosc.tk"
] |
naczelnik@jawnosc.tk
|
3bd9d220a3bff416185608e78413c0e27bdbfaf2
|
a382716034b91d86ac7c8a548a63d236d6da8032
|
/iaso/dhis2/api_logger.py
|
7347f082e7d6aac87c5f6449751f5d6ad37657dc
|
[
"MIT"
] |
permissive
|
lpontis/iaso
|
336221335fe33ca9e07e40feb676f57bbdc749ca
|
4d3a9d3faa6b3ed3a2e08c728cc4f03e5a0bbcb6
|
refs/heads/main
| 2023-08-12T20:34:10.823260
| 2021-10-04T07:34:50
| 2021-10-04T07:34:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,627
|
py
|
from iaso.models.base import ExportLog
from dhis2 import RequestException
import json
class ApiLogger:
def __init__(self, api):
self.api = api
self.export_logs = []
def get(self, url, params=None):
full_url = self.api.base_url + "/" + url
try:
response = self.api.get(url, params=params)
export_log = ExportLog()
export_log.sent = params
export_log.received = response.json()
export_log.url = full_url
export_log.http_status = 200
self.export_logs.append(export_log)
except RequestException as dhis2_exception:
self.log_exception(dhis2_exception, full_url, params)
return response
def post(self, url, payload):
full_url = self.api.base_url + "/" + url
try:
response = self.api.post(url, payload)
export_log = ExportLog()
export_log.sent = payload
export_log.received = response.json()
export_log.url = full_url
export_log.http_status = 200
self.export_logs.append(export_log)
except RequestException as dhis2_exception:
self.log_exception(dhis2_exception, full_url, payload)
return response
def put(self, url, payload):
full_url = self.api.base_url + "/" + url
response = self.api.put(url, payload)
try:
export_log = ExportLog()
export_log.sent = payload
export_log.received = response.json()
export_log.url = full_url
export_log.http_status = 200
except RequestException as dhis2_exception:
self.log_exception(dhis2_exception, full_url, payload)
self.export_logs.append(export_log)
return response
def pop_export_logs(self):
result = self.export_logs
self.export_logs = []
return result
def log_exception(self, dhis2_exception, full_url, params):
resp = {}
try:
resp = json.loads(dhis2_exception.description)
except:
resp = {
"status": "ERROR",
"description": "non json response return by server",
"raw_data": dhis2_exception.description,
}
resp = json.loads(dhis2_exception.description)
export_log = ExportLog()
export_log.url = full_url
export_log.sent = params
export_log.received = resp
export_log.http_code = dhis2_exception.code
self.export_logs.append(export_log)
raise dhis2_exception
|
[
"tech@bluesquarehub.com"
] |
tech@bluesquarehub.com
|
ec98c3f0072afc3854407df67e70813898de5049
|
baad457b1859f218314631c2bf10e28ab34aa0a5
|
/chapter02.py
|
d3f57722b9729416a00f466431e597015e6902aa
|
[] |
no_license
|
hankehly/data-structures-and-algorithms-in-python
|
d856ae6049639d3f713556e7aee91cb51443f99b
|
26d2ad4852c103e5c96138de6957fa092a6b81aa
|
refs/heads/master
| 2023-02-19T13:38:52.986727
| 2021-01-25T00:51:19
| 2021-01-25T00:51:19
| 325,148,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,526
|
py
|
def R_2_01():
"""
3 examples of life-critical software applications are
1. remote surgery software that moves robotic arms
2. automated rocket ignition and trajectory adjustment software in space flight
3. pacemakers
"""
pass
def R_2_02():
"""
An example of a software application where adaptability makes the difference between
continued sales and bankruptcy is a data-analysis app. To be marketable to a wide
audience, it must scale to different amounts of input. If the system was written in
a way that assumes a maximum of N data-points, sales may be unable to target certain
clients, or the company may lose its existing customers to competitors with more
scalable systems.
"""
pass
def R_2_03():
"""
An example text-editor GUI may encapsulate behavior to process key-strokes, save a
text file, print a text file, or pass a text file to the python interpreter to run
as a program.
"""
pass
def R_2_04():
class Flower:
def __init__(self, name: str = "iris", n_petals: int = 5, price: float = 0.99):
self._name = name
self._n_petals = n_petals
self._price = price
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def n_petals(self):
return self._n_petals
@n_petals.setter
def n_petals(self, value):
self._n_petals = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
if value < 0:
raise ValueError("price must be positive")
self._price = value
f = Flower()
assert f.name == "iris"
f.name = "foo"
assert f.name == "foo"
try:
f.price = -1
except ValueError:
assert True
else:
assert False
def R_2_05():
class CreditCard:
def charge(self, price):
if not isinstance(price, (int, float)):
raise TypeError("price must be numeric")
def R_2_06():
class CreditCard:
def make_payment(self, amount):
if not isinstance(amount, (int, float)):
raise TypeError("amount must be numeric")
elif amount < 0:
raise ValueError("amount cannot be negative")
def R_2_07():
class CreditCard:
def __init__(self, customer, bank, acnt, limit, balance=0):
self._customer = customer
self._bank = bank
self._account = acnt
self._limit = limit
self._balance = balance
def R_2_08():
class CreditCard:
def __init__(self, customer, bank, acnt, limit, balance=0):
self._customer = customer
self._bank = bank
self._account = acnt
self._limit = limit
self._balance = balance
@property
def bank(self):
return self._bank
@property
def balance(self):
return self._balance
def charge(self, price):
if self._balance + price > self._limit:
return False
else:
self._balance += price
return True
def run():
wallet = [
CreditCard("John Archer", "KS Savings", "1234 5678 9101 1121", 2500),
CreditCard("John Archer", "KS Federal", "4321 5678 9101 1121", 3500),
CreditCard("John Archer", "KS Finance", "2413 5678 9101 1121", 5000),
]
for i in range(1, 100):
for j, card in enumerate(wallet, start=1):
price = i * j
if not card.charge(price):
print(f"{card.bank} (i={i}, balance={card.balance}, price={price})")
return
# run()
def R_2_09():
class Vector:
def __init__(self, d):
self._coords = [0] * d
def __len__(self):
return len(self._coords)
def __getitem__(self, item):
return self._coords[item]
def __setitem__(self, key, value):
self._coords[key] = value
def __sub__(self, other):
if len(other) != len(self):
raise ValueError("dimensions must agree")
result = Vector(len(other))
for i in range(len(result)):
result[i] = self[i] - other[i]
return result
u = Vector(3)
u[0] = 3
u[1] = 3
u[2] = 3
v = Vector(3)
v[0] = 4
v[1] = 1
v[2] = 3
r = u - v
assert r[0] == -1
assert r[1] == 2
assert r[2] == 0
def R_2_10():
class Vector:
def __init__(self, d):
self._coords = [0] * d
def __len__(self):
return len(self._coords)
def __getitem__(self, item):
return self._coords[item]
def __setitem__(self, key, value):
self._coords[key] = value
def __neg__(self):
result = Vector(len(self))
for i in range(len(result)):
result[i] = -self[i]
return result
v = Vector(3)
v[0] = 1
v[1] = 0
v[2] = -1
r = -v
assert r[0] == -1
assert r[1] == 0
assert r[2] == 1
if __name__ == "__main__":
R_2_01()
R_2_02()
R_2_03()
R_2_04()
R_2_05()
R_2_06()
R_2_07()
R_2_08()
R_2_09()
R_2_10()
|
[
"henry.ehly@gmail.com"
] |
henry.ehly@gmail.com
|
5d45c573744b34fc2473e7e50c09225740106a27
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/77e550192cdbc1e734f5ebfcd347539d9300559b-<main>-fix.py
|
b577d86ced2688cfc84b23b2c720d617725534ed
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,507
|
py
|
def main():
module = AnsibleModule(argument_spec=dict(command=dict(default='install', type='str', required=False), arguments=dict(default='', type='str', required=False), working_dir=dict(type='path', aliases=['working-dir']), global_command=dict(default=False, type='bool', aliases=['global-command']), prefer_source=dict(default=False, type='bool', aliases=['prefer-source']), prefer_dist=dict(default=False, type='bool', aliases=['prefer-dist']), no_dev=dict(default=True, type='bool', aliases=['no-dev']), no_scripts=dict(default=False, type='bool', aliases=['no-scripts']), no_plugins=dict(default=False, type='bool', aliases=['no-plugins']), optimize_autoloader=dict(default=True, type='bool', aliases=['optimize-autoloader']), ignore_platform_reqs=dict(default=False, type='bool', aliases=['ignore-platform-reqs'])), required_if=[('global_command', False, ['working_dir'])], supports_check_mode=True)
command = module.params['command']
if re.search('\\s', command):
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
arguments = module.params['arguments']
global_command = module.params['global_command']
available_options = get_available_options(module=module, command=command)
options = []
default_options = ['no-ansi', 'no-interaction', 'no-progress']
for option in default_options:
if (option in available_options):
option = ('--%s' % option)
options.append(option)
if (not global_command):
options.extend(['--working-dir', ("'%s'" % module.params['working_dir'])])
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
'no_dev': 'no-dev',
'no_scripts': 'no-scripts',
'no_plugins': 'no_plugins',
'optimize_autoloader': 'optimize-autoloader',
'ignore_platform_reqs': 'ignore-platform-reqs',
}
for (param, option) in option_params.items():
if (module.params.get(param) and (option in available_options)):
option = ('--%s' % option)
options.append(option)
if module.check_mode:
options.append('--dry-run')
(rc, out, err) = composer_command(module, command, arguments, options, global_command)
if (rc != 0):
output = parse_out(err)
module.fail_json(msg=output, stdout=err)
else:
output = parse_out((out + err))
module.exit_json(changed=has_changed(output), msg=output, stdout=(out + err))
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
f8c0c13aa9f2fb23aa4164a1148faad3a90c1454
|
0bf93a74ce5676e978f3ee79a98a1be90b0e20a5
|
/nagios/check_metar_station.py
|
7ccf2c55c95b5dbf22239356bc0d162e432bc650
|
[
"MIT"
] |
permissive
|
claudemp/iem
|
3c926361c55fde3265157e15bc5119d64dbf2418
|
557deb8c46342aa9a18ac56cba59345c072cf225
|
refs/heads/master
| 2021-04-06T13:39:08.352676
| 2018-03-12T20:58:14
| 2018-03-12T20:58:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
"""Ensure that a station is getting ingested properly
python check_metar_station.py <network> <id> <minute_of_synop>
"""
from __future__ import print_function
import sys
import datetime
from pyiem.util import get_dbconn
def check(network, station, minute):
"""Do the check"""
# Do IEMaccess
res = {'rt_temp': 'M', 'arch_temp': 'M'}
now = datetime.datetime.now() - datetime.timedelta(minutes=75)
res['rt_valid'] = now.replace(minute=minute, second=0, microsecond=0)
pgconn = get_dbconn('iem', user='nobody')
icursor = pgconn.cursor()
icursor.execute("""
SELECT tmpf from current_log c JOIN stations s on
(s.iemid = c.iemid)
WHERE id = %s and network = %s and valid = %s
""", (station, network, res['rt_valid']))
if icursor.rowcount > 0:
row = icursor.fetchone()
res['rt_temp'] = int(row[0])
# Do ASOS
now = datetime.datetime.now() - datetime.timedelta(minutes=135)
res['arch_valid'] = now.replace(minute=minute, second=0, microsecond=0)
pgconn = get_dbconn('asos', user='nobody')
icursor = pgconn.cursor()
icursor.execute("""
SELECT tmpf from alldata where station = %s and valid = %s
""", (station, res['arch_valid']))
if icursor.rowcount > 0:
row = icursor.fetchone()
res['arch_temp'] = int(row[0])
return res
def main(argv):
"""Go Main"""
network = argv[1]
station = argv[2]
minute = int(argv[3])
res = check(network, station, minute)
msg = ('OK' if (res['rt_temp'] != 'M' and res['arch_temp'] != 'M')
else 'CRITICAL')
print(('%s - RT:%s(%s) ARCH:%s(%s) |rttemp=%s;;; archtemp=%s;;;'
) % (msg, res['rt_valid'].strftime("%d%H%M"), res['rt_temp'],
res['arch_valid'].strftime("%d%H%M"), res['arch_temp'],
res['rt_temp'], res['arch_temp']))
if msg == 'OK':
sys.exit(0)
sys.exit(2)
if __name__ == '__main__':
main(sys.argv)
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
77e3caad1c173557813c2f8d8ea67470f4576f4e
|
2553e81ac1927f9e3e4a18da60d6e9b9cc23affc
|
/Script2.py
|
cf98933e6342ec6fd17b8bf958969ac44e45f7e8
|
[] |
no_license
|
Sapphirine/ISP-Dataset
|
50bbc545c7c62cc39cefc25ed9c53f356e7efd53
|
c93190d182b56a3c3c574105d4aff0faefc1f065
|
refs/heads/master
| 2021-08-31T16:41:33.235009
| 2017-12-22T03:47:21
| 2017-12-22T03:47:21
| 115,070,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
plt.style.use('ggplot')
chosen_indicators = ['IE.PPI.TELE.CD', 'IT.CEL.SETS', 'SIT.CEL.SETS.p2', 'IT.NET.USER.ZS', \
'IT.NET.BBND', 'IT.NET.BBND.p2' ]
df_s = df[df['IndicatorCode'].isin(chosen_indicators)]
df_om = df_subset[df['CountryName']=="Oman"]
def plot_indicator(indicator,delta=10):
ds_f = df_s[['IndicatorName','Year','Value']][df_s['IndicatorCode']==indicator]
try:
title = df_s['IndicatorName'].iloc[0]
except:
title = "None"
x1 = df_s['Year'].values
plt.figure(figsize=(20,5))
|
[
"noreply@github.com"
] |
Sapphirine.noreply@github.com
|
83e68fc46fb7aa2a009ca2fa890446accaf1b950
|
55a6a35f7720089f8c7dcc083e3600cfbca0e6a2
|
/setup.py
|
3f401546aefe0ab2c9692fd02547d4ec9866e8f6
|
[
"MIT"
] |
permissive
|
SirMathhman/context_menu
|
8d878aa94c23f225b37e6f89e00cd31cec62d65f
|
bb619d06e15798de8ceb0ddd252d7ae26a492947
|
refs/heads/master
| 2022-09-04T01:03:28.354901
| 2020-05-31T17:11:01
| 2020-05-31T17:11:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
import pathlib
import setuptools
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setuptools.setup(
name="context_menu",
version="1.0.0",
description="Library to create cross-platform native context menus.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/saleguas/context_menu",
author="Salvador Aleguas",
author_email="salvador@aleguas.dev",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=setuptools.find_packages(),
)
|
[
"salvadoraleguas@gmail.com"
] |
salvadoraleguas@gmail.com
|
37ee79c2c93d9f8ed6baae1d8a818870133f718d
|
596e92d0d484b6e7eee6d322e72e52748fdeaa5d
|
/test/test_nhl_stats_standing.py
|
33a21fdb377d2716a2c8b6e6677a31bbaea3972a
|
[] |
no_license
|
scottypate/sportsdata
|
f5f61ddc7eb482883f93737c6ce73dd814ed4336
|
a07955ab50bf4fff1ce114ed9895095ff770c473
|
refs/heads/main
| 2023-08-18T16:51:56.452678
| 2021-10-22T12:44:08
| 2021-10-22T12:44:08
| 420,062,350
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
# coding: utf-8
"""
NHL v3 Stats
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sportsdata.nhl_stats
from sportsdata.nhl_stats.models.nhl_stats_standing import NhlStatsStanding # noqa: E501
from sportsdata.nhl_stats.rest import ApiException
class TestNhlStatsStanding(unittest.TestCase):
"""NhlStatsStanding unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNhlStatsStanding(self):
"""Test NhlStatsStanding"""
# FIXME: construct object with mandatory attributes with example values
# model = sportsdata.nhl_stats.models.nhl_stats_standing.NhlStatsStanding() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"scotty.pate@auth0.com"
] |
scotty.pate@auth0.com
|
6ea1daf688c5dea9387fb2ba10c5fbdfb8ce9008
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC2102.py
|
96caeab00be7aac3bbb16672f84d220d9143ee46
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,224
|
py
|
# qubit number=4
# total number=33
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=20
prog.cz(input_qubit[0],input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.h(input_qubit[3]) # number=25
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.y(input_qubit[2]) # number=18
prog.z(input_qubit[3]) # number=28
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.h(input_qubit[1]) # number=19
prog.h(input_qubit[0]) # number=15
prog.cz(input_qubit[2],input_qubit[0]) # number=16
prog.h(input_qubit[0]) # number=17
prog.y(input_qubit[1]) # number=26
prog.y(input_qubit[1]) # number=27
prog.swap(input_qubit[1],input_qubit[0]) # number=29
prog.swap(input_qubit[1],input_qubit[0]) # number=30
prog.x(input_qubit[1]) # number=31
prog.x(input_qubit[1]) # number=32
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2102.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
4fa76e84426de96dcf88970cf3e131fedae3b972
|
a0d8e69fb80c5bcf7bdfd2c599f83fe71b04f48f
|
/lab4/mysite/settings.py
|
28d0892856efc88363d768d3008dc78678b2ecaa
|
[] |
no_license
|
DawidPawlowski123/aplikacje-internetowe-Dawid-Pawlowski-185ic
|
abb0d177ee5919294ee76af651c1c615053b1347
|
5e40ed6f0ff7615405d7ec38a649a2bd7baaff97
|
refs/heads/main
| 2023-02-25T08:39:28.672531
| 2021-01-29T14:43:00
| 2021-01-29T14:43:00
| 312,213,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,824
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '69wd+sx_)dzst15rojvt-f$16r=3x0x8ql=lv8%(mtbv*!_c9l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'post',
'rest_framework',
'rest_framework.authtoken',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_auth',
'rest_auth.registration',
# Swagger
'drf_yasg',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
#'rest_framework.permissions.AllowAny',
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
],
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 1
|
[
"dawidpawlowski98@wp.pl"
] |
dawidpawlowski98@wp.pl
|
d59bbb75712262a49cc6527e34bb5d872fe48b59
|
712085c99797a14c2c5cff0e46a3de33b307d5dd
|
/eveuniverse/management/commands/eveuniverse_purge_data.py
|
b312e93dcd044b42f0c88a8e0a6ce4b1ff6c30df
|
[
"MIT"
] |
permissive
|
staropera/django-eveuniverse
|
eeb86a8086fb83e853d0ae5562a97d6969a48039
|
4dd875238d475c5dd0355283b6257e3bcbad2d8b
|
refs/heads/master
| 2023-07-05T05:53:11.295782
| 2021-07-03T13:21:31
| 2021-07-03T13:21:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from ... import __title__
from ...models import EveUniverseBaseModel
from ...utils import LoggerAddTag
from . import get_input
logger = LoggerAddTag(logging.getLogger(__name__), __title__)
class Command(BaseCommand):
help = (
"Removes all app-related data from the database. "
"Run this command before zero migrations, "
"which would otherwise fail due to FK constraints."
)
def _purge_all_data(self):
"""updates all SDE models from ESI and provides progress output"""
with transaction.atomic():
for MyModel in EveUniverseBaseModel.all_models():
self.stdout.write(
"Deleting {:,} objects from {}".format(
MyModel.objects.count(),
MyModel.__name__,
)
)
MyModel.objects.all().delete()
def handle(self, *args, **options):
self.stdout.write(
"This command will delete all app related data in the database. "
"This can not be undone. Note that this can disrupt other apps "
"that relate to this data. Use with caution."
)
user_input = get_input("Are you sure you want to proceed? (y/N)?")
if user_input.lower() == "y":
self.stdout.write("Starting data purge. Please stand by.")
self._purge_all_data()
self.stdout.write(self.style.SUCCESS("Purge complete!"))
else:
self.stdout.write(self.style.WARNING("Aborted"))
|
[
"erik.kalkoken@gmail.com"
] |
erik.kalkoken@gmail.com
|
41a5fb2910a6e40b7bd1be04cfbbdb0ef8e86f82
|
50f6ddd45ae2811ae4580ceb4a5617106145769f
|
/player461.py
|
5fbfcb44c5bea39949159c1ff65d614cf4d54fcb
|
[] |
no_license
|
MohammedFerozHussain/guvi1
|
432f89701b8d0d6e75229dbd199e362a42e2df53
|
cd4ae6c903bd5de883234d076aec770e2c8bcd41
|
refs/heads/master
| 2020-04-15T05:10:19.398719
| 2019-08-17T18:16:39
| 2019-08-17T18:16:39
| 164,411,569
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
import math
A=int(input())
c=math.radians(A)
x=math.sin(c)
c1=(round(x,1))
k=abs(c1)
if k<1 :
print(c1)
else:
print(round(c1))
|
[
"noreply@github.com"
] |
MohammedFerozHussain.noreply@github.com
|
4108e3aeb18c96e6d9ded60bf08972664cb1c6bc
|
941cbcc815da9927c16291fd0cf341fdf26d4b4b
|
/Testing/Selenium/200518_First_demo.py
|
c33183c4fbe3ba07578a79c3b87637358574efb9
|
[] |
no_license
|
narru888/PythonWork-py37-
|
27de004157efdf42972f66b20872e17de8bc676c
|
f9cb1670fb84b9eb8aaaf7cd5cf9139ab4ef4053
|
refs/heads/master
| 2022-12-09T04:58:06.068302
| 2020-09-23T09:47:40
| 2020-09-23T09:47:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,787
|
py
|
from selenium import webdriver
# 啟動瀏覽器驅動和瀏覽器,返回的是WebDriver對象
# 參數為chrome驅動的路徑
wd = webdriver.Chrome(r'd:\Programming\WorkPlace\PythonWork(py37)\Testing\Selenium\chromedriver\chromedriver.exe')
# 跳轉頁面
wd.get(url='https://www.google.com.tw/')
# 因為有可能會遇到一種情況,就是web server尚未向我們(用戶)的瀏覽器返回頁面時,我們的程序繼續運行下一行代碼,導致找不到要的Element。
# 所以這時可以使用implicitly_wait(5),他代表的是最多等5秒,每0.5秒會去搜索一次,如果搜索到了就繼續下一行代碼,反之則等待下一個0.5秒搜索。
wd.implicitly_wait(5)
"""1. 現在在Google首頁"""
"""
整體:
wd.implicitly_wait(5): 隱式等待
wd.get(url='https://www.google.com.tw/'): 打開網站
wd.title: 當前窗口的標記欄(google分頁上顯示的那個標題)
wd.quit(): 關閉瀏覽器驅動和瀏覽器
wd.current_window_handle: 當前窗口的handle
wd.switch_to.window(handle): 切換窗口
wd.switch_to.frame('frame的id或WebElement對象'): 切換frame
wd.switch_to.default_content(): 切換回到主Html
選擇元素:
find_element找不到對象返回異常;find_elements找不到對象返回[]。
通過class找第一個對象: find_element_by_class_name
通過class找所有對象(返回列表): find_elements_by_class_name
通過id找第一個對象: find_element_by_id
通過id找所有對象(返回列表): find_elements_by_id
通過標籤名(EX:div)找對象: find_element(s)_by_tag_name
操作元素:
點擊對象(通常用於button或a對象): WebElement對象.click()
輸入字符串: WebElement對象.send_keys()
獲取文本內容(介面上看的到的文本): WebElement對象.text
獲取元素屬性: WebElement對象.get_attribute(EX:'a')
獲取整個元素對應的HTML文本內容: WebElement對象.get_attribute('outerHTML')
獲取此元素内部所有的HTML文本內容: WebElement對象.get_attribute('innerHTML')
獲取輸入框裡面的文字(不能用.text獲取): WebElement對象.get_attribute('value')
獲取文本內容(介面上看不到的文本): WebElement對象.get_attribute('textContent'或'innerText')
CSS選擇器:
通過class找對象: find_element(s)_by_css_selector('.class值')
通過id找對象: find_element(s)_by_css_selector('#id值')
通過標籤名找對象: find_element(s)_by_css_selector('標籤名')
通過屬性找對象: find_element(s)_by_css_selector('[屬姓名]') EX: [href],也可以指定值[href="http://www.python3.vip/"]
通過屬性與其他方法混用:find_element(s)_by_css_selector('#id值[屬姓名]'或'.class值[屬姓名]'或'標籤名[屬姓名]')
獲取子元素對象: find_element(s)_by_css_selector('元素1 > 元素2') EX: #content > span 代表id=content的標籤中標籤名為span的子元素
獲取後代元素對象: find_element(s)_by_css_selector('元素1 元素2') EX: #content span 代表id=content的標籤中標籤名為span的後代元素
多個查詢加,號(代表或):find_element(s)_by_css_selector('#aa , div')
選擇第幾個子元素: find_element(s)_by_css_selector(':nth-child(數字)') EX: div:nth-child(2) 代表標簽名為div且剛好是在父元素中的第二子元素的標籤
選擇倒數第幾個子元素: find_element(s)_by_css_selector(':nth-last-child(數字)')
選擇第幾個div子元素: find_element(s)_by_css_selector('div:nth-of-type(數字)') EX:div:nth-of-type(2) 代表找出子元素中第二個標籤名為div的標籤
選擇倒數第幾個div子元素: find_element(s)_by_css_selector('div:nth-last-of-type(數字)')
選擇第奇數個子元素: find_element(s)_by_css_selector(':nth-child(odd)')
選擇第偶數個子元素: find_element(s)_by_css_selector(':nth-child(even)')
兄弟節點: find_element(s)_by_css_selector('元素1~元素2') EX: h3~span 代表找出兄弟節點中有h3的span標籤
相鄰節點: find_element(s)_by_css_selector('元素1+元素2') EX: h3+span 代表找出相鄰節點中有h3的span標籤
※ 在瀏覽器中也能透過CSS選擇器來進行查找(非常方便):
先按F12 -> 在上面一排選Elements -> 按 ctrl+f -> 輸入查詢條件
"""
# 返回WebElement對象。
ele_search = wd.find_element_by_class_name('gLFyf')
# print(ele_search)
# 在輸入框裡輸入"白月黑羽",並加上Enter鍵(\n)
ele_search.send_keys('白月黑羽\n')
"""2. 現在在Google搜索「白月黑羽」的頁面"""
div = wd.find_element_by_class_name('r')
div.find_element_by_tag_name('a').click()
"""3. 現在在「白月黑羽」首面"""
t = wd.find_element_by_css_selector('.nav-item')
print(t)
# 關閉瀏覽器驅動和瀏覽器
# wd.quit()
|
[
"as124122323@gmail.com"
] |
as124122323@gmail.com
|
5f3faf4c4562e9b0e64d315ddcd5eee864cc3b85
|
3325f16c04ca8e641cbd58e396f983542b793091
|
/Seção 10 - Epressões Lambdas e Funções Integradas/any_all.py
|
e496ebcb6cf885ddd9c9be5e515223aa5b9bd253
|
[] |
no_license
|
romulovieira777/Programacao_em_Python_Essencial
|
ac929fbbd6a002bcc689b8d6e54d46177632c169
|
e81d219db773d562841203ea370bf4f098c4bd21
|
refs/heads/master
| 2023-06-11T16:06:36.971113
| 2021-07-06T20:57:25
| 2021-07-06T20:57:25
| 269,442,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
"""
Any e All
all() -> Retorna True se todos os elementos do iterável são verdadeiros ou ainda se o iterável está vazio.
# Exemplo all()
print(all([0, 1, 2, 3, 4])) # Todos os números são verdadeiros? False
print(all([1, 2, 3, 4])) # Todos os números são verdadeiros? True
print(all([])) # Todos os números são verdadeiro? True
print(all((1, 2, 3, 4))) # Todos os números são verdadeiros? True
print(all({1, 2, 3, 4})) # Todos os números são veradeiros? True
print(all('Geek')) # Todos os números são verdadeiros? True
nomes = ['Carlos', 'Camila', 'Carla', 'Cassiano', 'Cristina']
print(all([nome[0] == 'C' for nome in nomes]))
# OBS: Um iterável vazio convertido em boolean é False, mas o all() entende como True
print(all([letra for letra in 'eio' if letra in 'aeiou']))
print(all([num for num in [4, 2, 10, 6, 8] if num % 2 == 0]))
any() -> Retorna True se qualquer elemento do iterável for verdadeiro. Se o iterável estiver vazio, retorna False
print(any([0, 1, 2, 3, 4])) # True
print(any([0, False, {}, (), []])) # False
nomes = ['Carlos', 'Camila', 'Carla', 'Cassiano', 'Cristina', 'Vanessa']
print(any([nome[0] == 'C' for nome in nomes]))
print(any([num for num in [4, 2, 10, 6, 8, 9] if num % 2 == 0]))
"""
|
[
"romulo.vieira777@gmail.com"
] |
romulo.vieira777@gmail.com
|
29d497b0958f5306ca1af7ce54ce68796eaabfc5
|
0486b6ccf883e9cd7a24bbd89b5420e7de2172b9
|
/DRF Study Material/Django REST Code/gs40/gs40/settings.py
|
385459f2976108c2c7956f10aac8a7fd6bed0e5f
|
[] |
no_license
|
ajitexl/restfrmaework
|
2980203d7faa6c8364288283758d32c8f2a37817
|
9ab203748e623516365d9924dcc68acc786a66e1
|
refs/heads/main
| 2023-02-03T08:52:00.672047
| 2020-12-10T09:50:51
| 2020-12-10T09:50:51
| 320,222,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
"""
Django settings for gs40 project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#n6u1#xqh2!drsp9790#cbu5p4tms9p4sod=x(051^82j8a*w1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gs40.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gs40.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"you@example.com"
] |
you@example.com
|
9a9dfa7493053bf7d54ab1b2f0a6907ca4e2210b
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-css/huaweicloudsdkcss/v1/model/show_cluster_volume_rsp.py
|
67727eed536ac36df3ddb9ef1537a0c6c9c17134
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,714
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowClusterVolumeRsp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'size': 'int'
}
attribute_map = {
'type': 'type',
'size': 'size'
}
def __init__(self, type=None, size=None):
"""ShowClusterVolumeRsp
The model defined in huaweicloud sdk
:param type: 实例磁盘类型。
:type type: str
:param size: 实例磁盘大小。
:type size: int
"""
self._type = None
self._size = None
self.discriminator = None
if type is not None:
self.type = type
if size is not None:
self.size = size
@property
def type(self):
"""Gets the type of this ShowClusterVolumeRsp.
实例磁盘类型。
:return: The type of this ShowClusterVolumeRsp.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ShowClusterVolumeRsp.
实例磁盘类型。
:param type: The type of this ShowClusterVolumeRsp.
:type type: str
"""
self._type = type
@property
def size(self):
"""Gets the size of this ShowClusterVolumeRsp.
实例磁盘大小。
:return: The size of this ShowClusterVolumeRsp.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ShowClusterVolumeRsp.
实例磁盘大小。
:param size: The size of this ShowClusterVolumeRsp.
:type size: int
"""
self._size = size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowClusterVolumeRsp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.