blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a7419d47d115b22113387f8fd86234986c771a1 | f4e8c8294b23fe070a763b527bc2f75ccdebaab1 | /leetCodePython2020/1474.delete-n-nodes-after-m-nodes-of-a-linked-list.py | 8136d6345fa1a7de4237a5a73ae5124dabab1e71 | [] | no_license | HOZH/leetCode | aff083b33e9b908490e7e992a0ad192ee31cc2e5 | a0ab59ba0a1a11a06b7086aa8f791293ec9c7139 | refs/heads/master | 2023-08-17T08:44:07.884861 | 2023-08-08T19:47:42 | 2023-08-08T19:47:42 | 136,558,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | #
# @lc app=leetcode id=1474 lang=python3
#
# [1474] Delete N Nodes After M Nodes of a Linked List
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteNodes(self, head: ListNode, m: int, n: int) -> ListNode:
current = head
skip, delete = m-1, n
flag = False
while current:
if skip:
skip -= 1
current = current.next
else:
temp = current.next
if temp == None:
break
while delete:
temp = temp.next
if temp == None:
flag = True
break
delete -= 1
current.next = temp
skip = m
delete = n
if flag == True:
break
return head
# @lc code=end
| [
"hozh96@gmail.com"
] | hozh96@gmail.com |
3e99fae9007287918e670bfe84e392c317a21be5 | 79c8b6f17f22dd6b72b7ab228e38539797b3d1e9 | /apps/prolongation/forms.py | 5b54802c3f43e80b704fae0f7e5bf41204925cd3 | [] | no_license | wd5/system | ac81e76413c620a225fdaff335f5c59f6ebf5bd0 | 26d8453965598e5b28bf2178c5cd01e637ac89b7 | refs/heads/master | 2021-01-17T22:07:50.130502 | 2013-01-22T14:37:08 | 2013-01-22T14:37:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | #encoding: utf-8
from django import forms
from models import UserProlongation, StatusChange
from django.contrib.admin import widgets
from vendors.mptt.fields import TreeNodeChoiceField
from common.forms import CoolModelForm
from participants.models import District
def get_districts_choices(with_empty_row=False):
districts = District.objects.select_related().all()
choices = []
if with_empty_row:
choices.append((0, u'----'))
for district in districts:
choices.append((district.id, district.city.name + u': '+ district.name))
return choices
class UserProlongationForm(CoolModelForm):
district = forms.ChoiceField(choices=get_districts_choices(True), label=u'Выберите район',
help_text=u'Нажмите на список, чтобы выбрать район, в котором находится библиотека')
date_of_return = forms.DateField(
label=u'Срок возврата (дд.мм.гггг)',widget=widgets.AdminDateWidget,
help_text=u'Укажите дату, когда документ должен быть возвращен в библиотеку '
)
new_date_of_return = forms.DateField(
label=u'Срок продления (дд.мм.гггг)',widget=widgets.AdminDateWidget,
help_text=u'Укажите дату, до которой хотите продлить срок возврата. Не более, чем 30 дней со дня старого срока возврата'
)
class Meta:
model = UserProlongation
exclude = ('user','status', 'recive_library', 'manage_library')
class ChangeStatusForm(CoolModelForm):
comments = forms.CharField(widget=forms.Textarea, label=u'Комментарии пользователю', max_length=1024)
class Meta:
model = StatusChange
exclude = ('prolongation','prolongation_manager', 'status', 'change_date', ) | [
"dostovalov@gmail.com"
] | dostovalov@gmail.com |
3030d74ebb97635d2c611839c05ce641b45d3b5c | 3485a66a944aca21a0960c2f5a96c3b32ee24fb1 | /dlk/tests/onnx/test_onnx_io.py | 22535caa06b8d1af509641b77159947eb0b00d4d | [
"Apache-2.0"
] | permissive | tsawada/blueoil | 17733363f6bb4ab7c7d24571ea4e76a903aef378 | 745a2eb25e090e0ff9af547c1a11b538bf7e5c8a | refs/heads/master | 2020-06-14T02:15:03.640939 | 2019-08-20T01:43:28 | 2019-08-20T01:43:28 | 194,863,619 | 0 | 0 | Apache-2.0 | 2019-07-02T12:59:13 | 2019-07-02T12:59:13 | null | UTF-8 | Python | false | false | 3,995 | py | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test file for OnnxIO."""
from os import path
import unittest
from core.graph import Graph
def onnx_is_available() -> bool:
available = True
try:
__import__('onnx')
except ImportError:
available = False
return available
class TestOnnxIO(unittest.TestCase):
"""Test class for OnnxIO."""
@unittest.skipUnless(onnx_is_available(), "ONNX is not available (reinstall with --enable-onnx)")
def test_onnx_import(self) -> None:
"""Test code for importing ONNX file with OnnxIO."""
from frontend.onnx import OnnxIO
onnx_path = path.join('examples',
'classification',
'lmnet_quantize_cifar10_stride_2.20180523.3x3')
@unittest.skipUnless(onnx_is_available(), "ONNX is not available (reinstall with --enable-onnx)")
def future_test_onnx_import_lmnet_classification(self) -> None:
"""Test code for importing lmnet classification via ONNX."""
# Current lmnet classification onnx has several problems,
# so we leave this test for the near future.
from frontend.onnx import OnnxIO
onnx_path = path.join('examples',
'classification',
'lmnet_quantize_cifar10_stride_2.20180523.3x3',
# 'lmnet_quantize_cifar10_max_pooling',
'onnx_minimal_graph_with_shape.pb')
onnx_io = OnnxIO()
model = onnx_io.read(onnx_path)
print("ONNX import test (using lmnet classification) passed!")
@unittest.skipUnless(onnx_is_available(), "ONNX is not available (reinstall with --enable-onnx)")
def test_onnx_import_pytorch_alexnet(self) -> None:
"""Test code for importing PyTorch AlexNet via ONNX."""
from frontend.onnx import OnnxIO
# install torch and torchvisiion anyway
from pip._internal import main as pipmain
pipmain(['install', 'torch', 'torchvision'])
from torch.autograd import Variable
import torch.onnx
import torchvision
dummy_input = Variable(torch.randn(10, 3, 224, 224))
model = torchvision.models.alexnet(pretrained=True)
# providing these is optional, but makes working with the
# converted model nicer.
input_names = ["learned_%d" % i for i in range(16)] + ["actual_input_1"]
output_names = ["output1"]
onnx_path = path.join('examples',
'classification',
'alexnet.pb')
torch.onnx.export(model, dummy_input, onnx_path, input_names=input_names,
output_names=output_names)
onnx_io = OnnxIO()
model = onnx_io.read(onnx_path)
# for debugging
# json_path = path.join('examples',
# 'classification',
# 'alexnet.json')
# model = onnx_io.read(onnx_path, json_path)
graph: Graph = model.graph
outputs = graph.get_outputs()
self.assertEqual(len(outputs), 1)
self.assertEqual(outputs[0].shape, [10, 1000])
print("ONNX import test (using PyTorch alexnet) passed!")
if __name__ == '__main__':
unittest.main()
| [
"matsuda@leapmind.io"
] | matsuda@leapmind.io |
69281b7f06729a2d89602751939ca08ebe0fe622 | 54e4c1a57765519c77d04fc02112c7f3bbacc595 | /prob_1329.py | 156b38e3a672f42a8f501938e3a886b26dae54e3 | [] | no_license | Hrishikesh-3459/leetCode | 80a864228a8a2ae41ac2623f970a13f409234eed | 42def57b8f70d179ca688314ae43747fc1e410a0 | refs/heads/master | 2023-05-07T01:37:19.375229 | 2021-05-25T01:58:05 | 2021-05-25T01:58:05 | 254,803,743 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | class Solution:
def diagonalSort(self, mat):
m = len(mat)
n = len(mat[0])
check = [[False for i in range(n)]for j in range(m)]
i = 0
j = 0
while self.check_if_over(check) == False and i < m and j < n:
row = i
col = j
cur = [mat[row][col]]
if check[row][col] == True:
j += 1
if j == n:
j = 0
i += 1
if i == m:
break
continue
check[row][col] = True
flag = True
while True:
row += 1
col += 1
if row < m and col < n:
cur.append(mat[row][col])
check[row][col] = True
else:
break
cur.sort()
row -= 1
col -= 1
while row >= i and col >= j and cur:
mat[row][col] = cur.pop()
row -= 1
col -= 1
j += 1
if j == n:
j = 0
i += 1
if i == m:
break
return mat
def check_if_over(self, check):
for i in check:
if False in i:
return False
return True
| [
"hrishikeshmm01@gmail.com"
] | hrishikeshmm01@gmail.com |
2906a35630c06f37c1402f6fa5d9902b007d01c7 | 14e7058adf766352a0b90b66b7dcf887105a481c | /portal/updates/templatetags/updates.py | ceed478aa39a25aae2c21e8a9a94ff5ba2b2496a | [
"BSD-2-Clause"
] | permissive | brunogamacatao/portalsaladeaula | 2b7f07f07c2518dd359f043483fbb27417f62aaf | 9429e485aa37ffea3208339a807032e9230a3c84 | refs/heads/master | 2020-12-29T01:42:18.594281 | 2012-06-22T12:24:44 | 2012-06-22T12:24:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,076 | py | from django import template
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentType
from portal.updates.models import Update
register = template.Library()
class BaseUpdateNode(template.Node):
"""
Base helper class (abstract) for handling the get_update_* template tags.
Looks a bit strange, but the subclasses below should make this a bit more
obvious.
"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse get_update_list/count/form and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% get_whatever for obj as varname %}
if len(tokens) == 5:
if tokens[3] != 'as':
raise template.TemplateSyntaxError("Third argument in %r must be 'as'" % tokens[0])
return cls(
object_expr = parser.compile_filter(tokens[2]),
as_varname = tokens[4],
)
# {% get_whatever for app.model pk as varname %}
elif len(tokens) == 6:
if tokens[4] != 'as':
raise template.TemplateSyntaxError("Fourth argument in %r must be 'as'" % tokens[0])
return cls(
ctype = BaseUpdateNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3]),
as_varname = tokens[5]
)
else:
raise template.TemplateSyntaxError("%r tag requires 4 or 5 arguments" % tokens[0])
handle_token = classmethod(handle_token)
#@staticmethod
def lookup_content_type(token, tagname):
try:
app, model = token.split('.')
return ContentType.objects.get(app_label=app, model=model)
except ValueError:
raise template.TemplateSyntaxError("Third argument in %r must be in the format 'app.model'" % tagname)
except ContentType.DoesNotExist:
raise template.TemplateSyntaxError("%r tag has non-existant content-type: '%s.%s'" % (tagname, app, model))
lookup_content_type = staticmethod(lookup_content_type)
def __init__(self, ctype=None, object_pk_expr=None, object_expr=None, as_varname=None, update=None):
if ctype is None and object_expr is None:
raise template.TemplateSyntaxError("Update nodes must be given either a literal object or a ctype and object pk.")
self.update_model = Update
self.as_varname = as_varname
self.ctype = ctype
self.object_pk_expr = object_pk_expr
self.object_expr = object_expr
self.update = update
def render(self, context):
qs = self.get_query_set(context)
context[self.as_varname] = self.get_context_value_from_queryset(context, qs)
return ''
def get_query_set(self, context):
obj = self.object_expr.resolve(context)
return obj.get_update_list()
# ctype, object_pk = self.get_target_ctype_pk(context)
# if not object_pk:
# return self.update_model.objects.none()
#
# qs = self.update_model.objects.filter(
# content_type = ctype,
# object_pk = smart_unicode(object_pk),
# )
#
# return qs
def get_target_ctype_pk(self, context):
if self.object_expr:
try:
obj = self.object_expr.resolve(context)
except template.VariableDoesNotExist:
return None, None
return ContentType.objects.get_for_model(obj), obj.pk
else:
return self.ctype, self.object_pk_expr.resolve(context, ignore_failures=True)
def get_context_value_from_queryset(self, context, qs):
"""Subclasses should override this."""
raise NotImplementedError
class UpdateListNode(BaseUpdateNode):
"""Insert a list of updates into the context."""
def get_context_value_from_queryset(self, context, qs):
return list(qs)
class UpdateCountNode(BaseUpdateNode):
"""Insert a count of updates into the context."""
def get_context_value_from_queryset(self, context, qs):
return len(qs)
class RenderUpdateListNode(UpdateListNode):
"""Render the update list directly"""
#@classmethod
def handle_token(cls, parser, token):
"""Class method to parse render_update_list and return a Node."""
tokens = token.contents.split()
if tokens[1] != 'for':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'for'" % tokens[0])
# {% render_update_list for obj %}
if len(tokens) == 3:
return cls(object_expr=parser.compile_filter(tokens[2]))
# {% render_update_list for app.models pk %}
elif len(tokens) == 4:
return cls(
ctype = BaseUpdateNode.lookup_content_type(tokens[2], tokens[0]),
object_pk_expr = parser.compile_filter(tokens[3])
)
handle_token = classmethod(handle_token)
def render(self, context):
ctype, object_pk = self.get_target_ctype_pk(context)
if object_pk:
obj = self.object_expr.resolve(context)
if obj.updates_cache:
return obj.updates_cache
else:
template_search_list = [
"updates/%s/%s/list.html" % (ctype.app_label, ctype.model),
"updates/%s/list.html" % ctype.app_label,
"updates/list.html"
]
qs = self.get_query_set(context)
context.push()
liststr = render_to_string(template_search_list, {
"update_list" : self.get_context_value_from_queryset(context, qs)
}, context)
context.pop()
obj.updates_cache = liststr
obj.save()
return liststr
else:
return ''
# We could just register each classmethod directly, but then we'd lose out on
# the automagic docstrings-into-admin-docs tricks. So each node gets a cute
# wrapper function that just exists to hold the docstring.
#@register.tag
def get_update_count(parser, token):
"""
Gets the update count for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_update_count for [object] as [varname] %}
{% get_update_count for [app].[model] [object_id] as [varname] %}
Example usage::
{% get_update_count for event as update_count %}
{% get_update_count for calendar.event event.id as update_count %}
{% get_update_count for calendar.event 17 as update_count %}
"""
return UpdateCountNode.handle_token(parser, token)
#@register.tag
def get_update_list(parser, token):
"""
Gets the list of updates for the given params and populates the template
context with a variable containing that value, whose name is defined by the
'as' clause.
Syntax::
{% get_update_list for [object] as [varname] %}
{% get_update_list for [app].[model] [object_id] as [varname] %}
Example usage::
{% get_update_list for event as update_list %}
{% for update in update_list %}
...
{% endfor %}
"""
return UpdateListNode.handle_token(parser, token)
#@register.tag
def render_update_list(parser, token):
"""
Render the update list (as returned by ``{% get_update_list %}``)
through the ``updates/list.html`` template
Syntax::
{% render_update_list for [object] %}
{% render_update_list for [app].[model] [object_id] %}
Example usage::
{% render_update_list for event %}
"""
return RenderUpdateListNode.handle_token(parser, token)
register.tag(get_update_count)
register.tag(get_update_list)
register.tag(render_update_list) | [
"brunogamacatao@gmail.com"
] | brunogamacatao@gmail.com |
da4babe938a55d7fd72c4bfc6e80eedc4bc56c01 | ea7bcf81da6456260ce1a808a13d9d78efc2a897 | /pyacoustics/speech_detection/common.py | 44afaf9218810c7fdbe436b3bbaf5bdeec56daa3 | [
"MIT"
] | permissive | timmahrt/pyAcoustics | 912fdf6dc8f445387246cbee4855b1338f370b6a | c778e4ada301f420a71bf9f6d4b51beccccaecde | refs/heads/main | 2023-07-19T22:14:21.324789 | 2023-07-15T16:23:48 | 2023-07-15T16:23:48 | 37,000,171 | 79 | 16 | NOASSERTION | 2023-07-15T16:11:53 | 2015-06-07T01:09:28 | Python | UTF-8 | Python | false | false | 3,386 | py | """
Created on Jun 7, 2015
@author: tmahrt
"""
import struct
import wave
import math
from pyacoustics.signals import audio_scripts
class EndOfAudioData(Exception):
pass
def getSoundFileDuration(fn):
"""
Returns the duration of a wav file (in seconds)
"""
audiofile = wave.open(fn, "r")
params = audiofile.getparams()
framerate = params[2]
nframes = params[3]
duration = float(nframes) / framerate
return duration
def openAudioFile(fn):
audiofile = wave.open(fn, "r")
params = audiofile.getparams()
sampwidth = params[1]
framerate = params[2]
return audiofile, sampwidth, framerate
def rms(audioFrameList):
audioFrameList = [val**2 for val in audioFrameList]
meanVal = sum(audioFrameList) / len(audioFrameList)
return math.sqrt(meanVal)
def overlapCheck(interval, cmprInterval, percentThreshold=0):
"""Checks whether two intervals overlap"""
startTime, endTime = interval[0], interval[1]
cmprStartTime, cmprEndTime = cmprInterval[0], cmprInterval[1]
overlapTime = min(endTime, cmprEndTime) - max(startTime, cmprStartTime)
overlapTime = max(0, overlapTime)
overlapFlag = overlapTime > 0
if percentThreshold > 0 and overlapFlag:
totalTime = max(endTime, cmprEndTime) - min(startTime, cmprStartTime)
percentOverlap = overlapTime / float(totalTime)
overlapFlag = percentOverlap >= percentThreshold
return overlapFlag
def getMinMaxAmplitude(wavFN, stepSize, entryList=None):
audiofile = openAudioFile(wavFN)[0]
# By default, find the min and max amplitude for the whole file
if entryList is None:
stop = audio_scripts.getSoundFileDuration(wavFN)
entryList = [
(0, stop),
]
# Accumulate relevant energy values
rmsList = []
for entry in entryList:
start, stop = entry[0], entry[1]
currentTime = start
while currentTime < stop:
rmsList.append(rmsNextFrames(audiofile, stepSize))
currentTime += stepSize
# Return the min and max values
minValue = min(rmsList)
maxValue = max(rmsList)
return minValue, maxValue
def rmsNextFrames(audiofile, stepSize, normMinVal=None, normMaxVal=None):
params = audiofile.getparams()
sampwidth, framerate = params[1], params[2]
numFrames = int(framerate * stepSize)
waveData = audiofile.readframes(numFrames)
if len(waveData) == 0:
raise EndOfAudioData()
actualNumFrames = int(len(waveData) / float(sampwidth))
audioFrameList = struct.unpack("<" + "h" * actualNumFrames, waveData)
rmsEnergy = rms(audioFrameList)
if normMinVal is not None and normMaxVal is not None:
rmsEnergy = (rmsEnergy - normMinVal) / (normMaxVal - normMinVal)
return rmsEnergy
def mergeAdjacentEntries(entryList):
i = 0
while i < len(entryList) - 1:
if entryList[i][1] == entryList[i + 1][0]:
startEntry = entryList.pop(i)
nextEntry = entryList.pop(i)
entryList.insert(i, (startEntry[0], nextEntry[1]))
else:
i += 1
return entryList
def cropUnusedPortion(entry, start, stop):
retEntryList = []
if entry[0] < start:
retEntryList.append((entry[0], start))
if entry[1] > stop:
retEntryList.append((stop, entry[1]))
return retEntryList
| [
"timmahrt@gmail.com"
] | timmahrt@gmail.com |
5db3a8987722578fed28235ac67dbfe166ee6202 | 1d9ab83a59139f1b59f20451146499c7f2c1fe2e | /00web框架的本质/14uimethod/start.py | 5fdc2a15bc150d454d9ef23eb2bba57f6a4073f7 | [] | no_license | lannyMa/tornado_info | df1361d9ee5ec418cb3cb7681cb45a0f88560466 | fe2c4dfb6d882f5ce750322bcd496c8484452da1 | refs/heads/master | 2021-01-23T06:06:04.913158 | 2017-09-15T02:55:20 | 2017-09-15T02:55:20 | 102,489,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | #!/usr/bin/env python
# coding=utf-8
import tornado.web
import uimethod as mt
# 业务逻辑处理模块
INPUT_LIST=[]
class MainHandler(tornado.web.RequestHandler):
def get(self):
# self.write("Hello, world")
# self.render("bbs.html",names=INPUT_LIST)
self.render("bbs.html", npm="NPM", names=INPUT_LIST)
def post(self,*args,**kwargs):
name = self.get_argument("name")
INPUT_LIST.append(name)
self.render("bbs.html", npm="NPM", names=INPUT_LIST)
# 配置选项模块
settings = {
"template_path":"templates",
"static_path":"statics",
"ui_methods": mt,
}
# 路由模块
application = tornado.web.Application([
(r"/index", MainHandler)],
**settings)
## wsgi模块
if __name__ == "__main__":
print("http://127.0.0.1:8888/index")
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| [
"iher@foxmail.com"
] | iher@foxmail.com |
b2c8628e08de3b25e4a2efe061348b105be899b3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02572/s626980597.py | c8d152ab3d681e7b7eebb229e3f9202f9a0a2aee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | KK=10 ** 9 + 7
N = int(input())
A = list(map(int, input().split()))
X=0
K=sum(A) %KK
for x in A:
K-= x
X+=K*x
X%=KK
print(X) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
09c8172121ce98de464c378338ba814ab6f1d833 | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/collective/test_c_identity.py | 6f034bf16414726f8fdb352d7260b5f7d4e1f121 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 964 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test_collective_base import TestDistBase
import paddle
paddle.enable_static()
class TestIdentityOp(TestDistBase):
def _setup_config(self):
pass
def test_identity(self, col_type="identity"):
self.check_with_place("collective_identity_op.py", col_type)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
4378b43ae05e94906677124d1d5580789abb3a72 | e7dc7b84a31aca9bf2ca5e48a7813dec3d1fe817 | /utils.py | 3561afbba43bd164f4a31b09bbf8ea62cfdcedde | [] | no_license | BerenMillidge/PredictiveCodingDynamicalWeights | 8a17df1b162471b88963abb585fe976306209cc1 | 150cdef32ab2d5aeecc163195082b7c0f2c48dc2 | refs/heads/master | 2022-08-29T04:10:33.411811 | 2020-05-30T16:44:24 | 2020-05-30T16:44:24 | 268,092,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,512 | py |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.distributions as dist
from copy import deepcopy
import math
import matplotlib.pyplot as plt
global DEVICE
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def boolcheck(x):
return str(x).lower() in ["true", "1", "yes"]
def set_tensor(xs):
return xs.float().to(DEVICE)
def edge_zero_pad(img,d):
N,C, h,w = img.shape
x = torch.zeros((N,C,h+(d*2),w+(d*2))).to(DEVICE)
x[:,:,d:h+d,d:w+d] = img
return x
def accuracy(out, L):
B,l = out.shape
total = 0
for i in range(B):
if torch.argmax(out[i,:]) == torch.argmax(L[i,:]):
total +=1
return total/ B
def sequence_accuracy(model, target_batch):
accuracy = 0
L = len(target_batch)
_,B = target_batch[0].shape
s = ""
for i in range(len(target_batch)): # this loop is over the seq_len
s += str(torch.argmax(model.mu_y[i][:,0]).item()) + " " + str(torch.argmax(target_batch[i][:,0]).item()) + " "
for b in range(B):
#print("target idx: ", torch.argmax(target_batch[i][:,b]).item())
#print("pred idx: ", torch.argmax(model.mu_y[i][:,b]).item())
if torch.argmax(target_batch[i][:,b]) ==torch.argmax(model.mu_y[i][:,b]):
accuracy+=1
print("accs: ", s)
return accuracy / (L * B)
def custom_onehot(idx, shape):
ret = set_tensor(torch.zeros(shape))
ret[idx] =1
return ret
def onehot(arr, vocab_size):
L, B = arr.shape
ret = np.zeros([L,vocab_size,B])
for l in range(L):
for b in range(B):
ret[l,int(arr[l,b]),b] = 1
return ret
def inverse_list_onehot(arr):
L = len(arr)
V,B = arr[0].shape
ret = np.zeros([L,B])
for l in range(L):
for b in range(B):
for v in range(V):
if arr[l][v,b] == 1:
ret[l,b] = v
return ret
def decode_ypreds(ypreds):
L = len(ypreds)
V,B = ypreds[0].shape
ret = np.zeros([L,B])
for l in range(L):
for b in range(B):
v = torch.argmax(ypreds[l][:,b])
ret[l,b] =v
return ret
def inverse_onehot(arr):
if type(arr) == list:
return inverse_list_onehot(arr)
else:
L,V,B = arr.shape
ret = np.zeros([L,B])
for l in range(L):
for b in range(B):
for v in range(V):
if arr[l,v,b] == 1:
ret[l,b] = v
return ret
#Activation functions
def tanh(xs):
return torch.tanh(xs)
def linear(x):
return x
def tanh_deriv(xs):
return 1.0 - torch.tanh(xs) ** 2.0
def linear_deriv(x):
return set_tensor(torch.ones((1,)))
def relu(xs):
return torch.clamp(xs,min=0)
def relu_deriv(xs):
rel = relu(xs)
rel[rel>0] = 1
return rel
def softmax(xs):
return torch.nn.softmax(xs)
def sigmoid(xs):
return F.sigmoid(xs)
def sigmoid_deriv(xs):
return F.sigmoid(xs) * (torch.ones_like(xs) - F.sigmoid(xs))
## initializations functions
def gaussian_init(W,mean=0.0, std=0.05):
return W.normal_(mean=0.0,std=0.05)
def zeros_init(W):
return torch.zeros_like(W)
def kaiming_init(W, a=math.sqrt(5),*kwargs):
return init.kaiming_uniform_(W, a)
def glorot_init(W):
return init.xavier_normal_(W)
def kaiming_bias_init(b,*kwargs):
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
return init.uniform_(b, -bound, bound)
#the initialization pytorch uses for lstm
def std_uniform_init(W,hidden_size):
stdv = 1.0 / math.sqrt(hidden_size)
return init.uniform_(W, -stdv, stdv)
| [
"noreply@github.com"
] | BerenMillidge.noreply@github.com |
1d223a717acce32faf636829503f6a4037161c65 | ed865aed525556fd7aa5ac5a024af720de8438e3 | /tests/integration-tests/__init__.py | 221b7a2ecaedf18bc52002036cbeb1b3d1069a38 | [
"Python-2.0",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT-0",
"BSD-2-Clause"
] | permissive | aws/aws-parallelcluster | 7bb33a6e175168f63a1e0acb1a9a7e9cbc405eff | a213978a09ea7fc80855bf55c539861ea95259f9 | refs/heads/develop | 2023-09-05T15:12:18.533270 | 2023-09-05T14:38:59 | 2023-09-05T14:38:59 | 19,718,034 | 520 | 226 | Apache-2.0 | 2023-09-14T15:56:30 | 2014-05-12T22:42:19 | Python | UTF-8 | Python | false | false | 554 | py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
| [
"demartinof@icloud.com"
] | demartinof@icloud.com |
41f5a6c0a06987569d79b51f13b83e6c38328a56 | 4131a31723394b4f075e1c8170d4b1c30b4d4920 | /celescope/tag/analysis_tag.py | 126802850fda6cc2c223bee621a0cde75e48fb33 | [
"MIT"
] | permissive | Yixf-Self/CeleScope | 8be4ebf7236923316ff7bed4690385f96834b5c8 | e89525b16c92ded5b024822ba1286cdf10296d91 | refs/heads/master | 2023-02-02T20:24:31.174786 | 2020-12-16T05:46:22 | 2020-12-16T05:46:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import argparse
from .Analysis_tag import Analysis_tag
def get_opts_analysis_tag(parser, sub_program):
if sub_program:
parser.add_argument('--outdir', help='output dir', required=True)
parser.add_argument('--sample', help='sample name', required=True)
parser.add_argument('--match_dir', help='match_dir', required=True)
parser.add_argument('--tsne_tag_file', help='tsne tag file', required=True)
parser.add_argument('--assay', help='assay', required=True)
def analysis_tag(args):
analysis_tag_object = Analysis_tag(
args.sample,
args.outdir,
args.assay,
args.match_dir,
'analysis_tag',
)
analysis_tag_object.run(args.tsne_tag_file) | [
"zhouyiqi@singleronbio.com"
] | zhouyiqi@singleronbio.com |
81eb0e802c74a649123d6a272566234d6ca31607 | 4522fc52bc43654aadd30421a75bae00a09044f0 | /isis/decimal_edit.py | 481c403ed9b9486d501323a219685cc2a005361d | [] | no_license | qesoalpe/anelys | 1edb8201aa80fedf0316db973da3a58b67070fca | cfccaa1bf5175827794da451a9408a26cd97599d | refs/heads/master | 2020-04-07T22:39:35.344954 | 2018-11-25T05:23:21 | 2018-11-25T05:23:21 | 158,779,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | from PySide2.QtWidgets import QDoubleSpinBox
from isis.event import Event
from decimal import Decimal as D
class Decimal_Edit(QDoubleSpinBox):
def __init__(self, *args, **kwargs):
QDoubleSpinBox.__init__(self, *args, **kwargs)
self.setDecimals(2)
self.setButtonSymbols(self.NoButtons)
self.key_down = Event()
self.value_changed = Event()
def handle_value_changed(*args):
self.value_changed(self.value)
self.valueChanged.connect(handle_value_changed)
@property
def value(self):
return round(D(QDoubleSpinBox.value(self)), self.decimals if hasattr(self, 'decimals') and not callable(self.decimals) else 6)
@value.setter
def value(self, value):
if value is None:
self.setValue(0)
else:
self.setValue(value)
@property
def maximum(self):
return QDoubleSpinBox.maximum(self)
@maximum.setter
def maximum(self, maximum):
self.setMaximum(maximum)
@property
def minimum(self):
return QDoubleSpinBox.minimum(self)
@minimum.setter
def minimum(self, minimum):
self.setMinimum(minimum)
def keyPressEvent(self, event):
QDoubleSpinBox.keyPressEvent(self, event)
self.key_down(event)
focus = QDoubleSpinBox.setFocus | [
"qesoalpe@gmail.com"
] | qesoalpe@gmail.com |
50c1c15e2b1d65358bef75285df605e0c6f08b8b | 25e15f9f7183a16c18654937f517772f2fa0dd0d | /src/actions/run-tempest.py | 94bb5e7d5fd67ec475314dfa64dc910d102a1aeb | [
"Apache-2.0"
] | permissive | junaid-ali/charm-tempest | 956117bf1ca113bbc4e83a9ee56975913a9b79d2 | f1d83e0d2fa68e3baac700831c320f44c30d9f03 | refs/heads/master | 2021-01-10T22:46:00.583771 | 2016-10-05T22:45:56 | 2016-10-05T22:45:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | #!/usr/bin/env python3
import sys
sys.path.append('lib')
import charm.openstack.tempest as tempest
import charms.reactive as reactive
if __name__ == '__main__':
# Cloud may have different artifacts (flavors, images etc) since last run
# so rerun handlers file to regenerate config.
reactive.main()
tempest.run_test('smoke')
| [
"liam.young@canonical.com"
] | liam.young@canonical.com |
c1c318540bca5cdd58745276e028cd0945716940 | ed37a985a7411fb3b8f29282a81f1d823f8f4afc | /pascal_triangle/tests/test_pascal_triangle.py | 97e72f007bbdce96f05c1ede77270cbe78d69329 | [] | no_license | dmugtasimov/pascal_triangle | 5b310451582f6fc2ddc74f316259c6ec9fc4ec4b | 875deac43300a42560f0433a92e5f1e0475bb754 | refs/heads/master | 2021-06-16T10:55:11.338999 | 2017-04-11T17:20:54 | 2017-04-11T17:20:54 | 35,548,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import unittest
from pascal_triangle.implementations import ALL_IMPLEMENTATIONS
def test_method_template(implementation_class):
TRI_HEIGHT = 5
TRI_HEIGHT_SUM = 2 ** TRI_HEIGHT
def test_method(self):
result = implementation_class(return_list=True).build(TRI_HEIGHT)
self.assertEquals(sum(result), TRI_HEIGHT_SUM)
return test_method
class _TestPascalTriangleMeta(type):
def __new__(mcs, name, bases, attrs):
for implementation in ALL_IMPLEMENTATIONS:
attrs['test_' + implementation.__name__] = test_method_template(implementation)
return super(_TestPascalTriangleMeta, mcs).__new__(mcs, name, bases, attrs)
class TestPascalTriangle(unittest.TestCase):
__metaclass__ = _TestPascalTriangleMeta
| [
"dmugtasimov@gmail.com"
] | dmugtasimov@gmail.com |
9c4e17a756d3de453a7f7426eb35e933be498384 | 644b13f90d43e9eb2fae0d2dc580c7484b4c931b | /2019 baekjoon/GreedyAlgorithm/12904_A&B.py | 039e8f50f9c55d9c86fcae7db31582806dab294d | [] | no_license | yeonnseok/ps-algorithm | c79a41f132c8016655719f74e9e224c0870a8f75 | fc9d52b42385916344bdd923a7eb3839a3233f18 | refs/heads/master | 2020-07-09T11:53:55.786001 | 2020-01-26T02:27:09 | 2020-01-26T02:27:09 | 203,962,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | s = list(input())
t = list(input())
def flip(a):
new = []
while a:
new.append(a.pop())
return new
while len(t) != len(s):
if t[-1] == 'A':
t.pop()
elif t[-1] == 'B':
t.pop()
t = flip(t)
if t == s:
print(1)
else:
print(0)
| [
"smr603@snu.ac.kr"
] | smr603@snu.ac.kr |
62a6fa57c9d7c5e1b9220399de7f89106a2421c9 | 2ffd079c34cb07c738f7e5f703764fed68f2c8c0 | /Solutions/Boats_to_Save_People.py | 8457f5f14562cfa073a2ca26ba0fb6ac492d2b34 | [] | no_license | WuIFan/LeetCode | bc96355022c875bdffb39c89a2088457b97d30ab | 689a100ada757bc20334d5f0084587af3039ca7b | refs/heads/master | 2022-05-24T07:13:01.023733 | 2022-04-03T15:26:23 | 2022-04-03T15:26:23 | 202,471,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from typing import List
class Solution:
def numRescueBoats(self, people: List[int], limit: int) -> int:
people.sort()
i = 0
j = len(people) - 1
res = 0
while i < j:
if people[i] + people[j] > limit:
j -= 1
else:
j -= 1
i += 1
res += 1
return res if i != j else res + 1
people = [2, 49, 10, 7, 11, 41, 47, 2, 22, 6, 13, 12, 33, 18, 10, 26, 2, 6, 50, 10]
limit = 50
# people = [2, 4, 5]
# limit = 5
people = [3, 2, 2, 1]
limit = 3
print(Solution().numRescueBoats(people, limit))
| [
"denny91002@gmail.com"
] | denny91002@gmail.com |
7290f6983e9502963083c6e49356c72e5ab472da | 282ec49f8ce8aa176c24e4f13a8852c9b0752e4a | /ap/leetcode/pyleet/lc0162.py | c00b706942d22ee7d2f26c4782aa0820e69bec3e | [] | no_license | montreal91/workshop | b118b9358094f91defdae1d11ff8a1553d67cee6 | 8c05e15417e99d7236744fe9f960f4d6b09e4e31 | refs/heads/master | 2023-05-22T00:26:09.170584 | 2023-01-28T12:41:08 | 2023-01-28T12:41:08 | 40,283,198 | 3 | 1 | null | 2023-05-01T20:19:11 | 2015-08-06T03:53:44 | C++ | UTF-8 | Python | false | false | 1,702 | py | #
# Author: montreal91
# Time: 0:30
# Failed attempts: 0
#
import unittest
MIN = - 2 ** 31
class Solution:
def findPeakElement(self, nums):
mnts = [MIN] + nums + [MIN]
il = 0
ir = len(mnts) - 1
while il < ir:
im = (il + ir) // 2
if mnts[im - 1] < mnts[im] and mnts[im] > mnts[im + 1]:
return im - 1
if mnts[im - 1] <= mnts[im]:
il = im
else:
ir = im
return -1
class SolutionTestCase(unittest.TestCase):
def test_leet(self):
self.assertEqual(2, Solution().findPeakElement([1,2,3,1]))
self.assertTrue(Solution().findPeakElement([1,2,1,3,5,6,4]) in {1, 2, 5})
def test_corner(self):
self.assertEqual(0, Solution().findPeakElement([-10]))
self.assertEqual(0, Solution().findPeakElement([-10, -11]))
self.assertEqual(1, Solution().findPeakElement([2, 3]))
self.assertEqual(0, Solution().findPeakElement([3, 2, 1]))
self.assertEqual(1, Solution().findPeakElement([2, 3, 2]))
self.assertEqual(2, Solution().findPeakElement([1, 2, 3]))
self.assertEqual(0, Solution().findPeakElement([0, -1, -2, -3]))
self.assertEqual(1, Solution().findPeakElement([-1, 0, -2, -3]))
self.assertEqual(2, Solution().findPeakElement([-2, -1, 0, -3]))
self.assertEqual(3, Solution().findPeakElement([-3, -2, -1, 0]))
def test_random(self):
nums1 = [-6, -3, 2, -10, -8, -1, -3, -2, 9, -3, 9, 10, -3, 5, 9, -3, 10, -7, 6, -4]
self.assertTrue(Solution().findPeakElement(nums1) in {0, 2, 5, 8, 11, 14, 16})
if __name__ == "__main__":
unittest.main()
| [
"nefedov.alexander91@yandex.ru"
] | nefedov.alexander91@yandex.ru |
0f1952cc16a097804947d9554b615919033d1555 | dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0 | /src/main/scala/contest/219/CountOfMatchesInTournament.py | cbf192a7fc2cc24d777e15c397db75f5ca03de19 | [] | no_license | joestalker1/leetcode | 8a5cdda17abd33c3eef859732f75d7bec77a9d0e | ae392ddbc7eb56cb814b9e9715043c98a89a6314 | refs/heads/master | 2023-04-13T22:09:54.407864 | 2023-04-09T19:22:54 | 2023-04-09T19:22:54 | 131,803,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | class Solution:
def numberOfMatches(self, n: int):
def count_matches(n):
if n <= 1:
return 0
if n % 2 == 0:
return n// 2 + count_matches(n // 2)
return (n-1)//2 + count_matches(1 + (n - 1) // 2)
return count_matches(n)
| [
"denys@dasera.com"
] | denys@dasera.com |
d715a816467cc368b2e27d960e8e54e0ebf19723 | a94757526253f3a3ae4b65b662316e3b03b271e9 | /week6/Burst_Balloons.py | cb95a008e66daa661e793bdc9de4d3105930073d | [] | no_license | RRoundTable/CPPS | 3e30f4d9c48aa9c85d74db6696c6d458fa38523a | fb679053ee89c15e2844fda1d705d46778ea1c0d | refs/heads/master | 2021-07-06T07:19:43.806752 | 2020-12-15T10:59:08 | 2020-12-15T10:59:08 | 214,435,381 | 1 | 2 | null | 2020-02-06T14:53:13 | 2019-10-11T12:50:22 | Python | UTF-8 | Python | false | false | 2,250 | py | """
link: https://leetcode.com/problems/burst-balloons/
Given n balloons, indexed from 0 to n-1. Each balloon is painted with a number on it represented by array nums. You are asked to burst all the balloons. If the you burst balloon i you will get nums[left] * nums[i] * nums[right] coins. Here left and right are adjacent indices of i. After the burst, the left and right then becomes adjacent.
Find the maximum coins you can collect by bursting the balloons wisely.
Note:
You may imagine nums[-1] = nums[n] = 1. They are not real therefore you can not burst them.
0 ≤ n ≤ 500, 0 ≤ nums[i] ≤ 100
Example:
Input: [3,1,5,8]
Output: 167
Explanation: nums = [3,1,5,8] --> [3,5,8] --> [3,8] --> [8] --> []
coins = 3*1*5 + 3*5*8 + 1*3*8 + 1*8*1 = 167
"""
class Solution(object):
def maxCoins(self, nums):4
"""O(N!)/O(N!)
Backtracking
:type nums: List[int]
:rtype: int
"""
self.ans = 0
def backtracking(remain, idx, coins):
if not remain:
self.ans = max(self.ans, coins)
return coins
for i in range(len(remain)):
if i < 1:
left = 1
else:
left = remain[i - 1]
if i >= len(remain) - 1:
right = 1
else:
right = remain[i + 1]
backtracking(
remain[:i] + remain[i + 1 :],
idx + 1,
coins + left * remain[i] * right,
)
backtracking(nums, 0, 0)
return self.ans
class Solution(object):
def maxCoins(self, nums):
'''O(N ^ 3)/O(N ^ 2)'''
nums = [1] + nums + [1]
n = len(nums)
dp = [[0] * len(nums) for _ in range(n)] # n * n [left, right\
for k in range(2, n):
for left in range(0, n - k):
right = left + k
for i in range(left+1, right):
dp[left][right] = max(dp[left][right],
nums[left] * nums[i] * nums[right] + dp[left][i] + dp[i][right])
return dp[0][n - 1]
| [
"ryu071511@gmail.com"
] | ryu071511@gmail.com |
675343e06faf2cf96b15116a8efead79da926023 | 795c2d7e2188f2ecb3e72bbb4053726856009c0d | /ctrl/UM_N1280/MASS/u-al508_ap9_surf_wind.py | f12ffe79ba6ffebffcefe041e759f41314f3b2a3 | [
"Apache-2.0"
] | permissive | markmuetz/cosmic | 3a4ef310cb9cb92b81ff57b74bb1511841f790a5 | f215c499bfc8f1d717dea6aa78a58632a4e89113 | refs/heads/master | 2023-08-01T10:55:52.596575 | 2021-09-20T19:26:33 | 2021-09-20T19:26:33 | 217,045,140 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import sys
sys.path.insert(0, '.')
from common import AP9_SURF_WIND, BASE_OUTPUT_DIRPATH
AP9_SURF_WIND['start_year_month'] = (2006, 6)
AP9_SURF_WIND['end_year_month'] = (2006, 8)
ACTIVE_RUNIDS = ['u-al508']
MASS_INFO = {}
MASS_INFO['u-al508'] = {
'stream': {
'ap9': AP9_SURF_WIND,
},
}
| [
"markmuetz@gmail.com"
] | markmuetz@gmail.com |
6252183c5567fc46783ed4186bd8371dd020d801 | 68f757e7be32235c73e316888ee65a41c48ecd4e | /백준_python/5000/5724.py | 7b73a4f533a8d37c1c269269f298a8bb8378dcce | [] | no_license | leejongcheal/algorithm_python | b346fcdbe9b1fdee33f689477f983a63cf1557dc | f5d9bc468cab8de07b9853c97c3db983e6965d8f | refs/heads/master | 2022-03-05T20:16:21.437936 | 2022-03-03T01:28:36 | 2022-03-03T01:28:36 | 246,039,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import sys
res = []
name = sys.stdin.read().splitlines()
for idx in range(len(name)-1):
a = 0
for i in range(1,int(name[idx])+1):
a += i**2
res.append(a)
print("\n".join(map(str,res)))
| [
"aksndk123@naver.com"
] | aksndk123@naver.com |
af431faa9cacaf363149a53584e86a14881b8268 | 16c141516b00fc0d6849da3d8ecc64639aef95de | /convert_format_final.py | c388246910c3530ab4358f124111731b70708ee1 | [] | no_license | intfreedom/python | b7c70f2ab025744b736ddd420bc398927c02a9db | f3aa13df84ca8bf71de435146598e3ca1d5e6503 | refs/heads/master | 2022-10-15T12:45:45.768036 | 2020-06-15T01:46:03 | 2020-06-15T01:46:03 | 158,104,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,029 | py | # -*- coding:utf-8 -*-
#!/user/bin/python3
industry_list = [
{
"parent_ind": "女装",
"name": "连衣裙"
},
{
"name": "女装"
},
{
"parent_ind": "女装",
"name": "半身裙"
},
{
"parent_ind": "女装",
"name": "A字裙"
},
{
"name": "数码"
},
{
"parent_ind": "数码",
"name": "电脑配件"
},
{
"parent_ind": "电脑配件",
"name": "内存"
},
]
parent_ind_list = []
name_list = []
for d in industry_list:
if "parent_ind" in d:
parent_ind_list.append(d["parent_ind"])
name_list.append(d["name"])
else:
parent_ind_list.append(d["name"])
# parent_ind_list, name_list分别去重
parent_ind_list = list(set(parent_ind_list))
name_list = list(set(name_list))
# parent_ind_list中去掉和name_list重复的部分,就是顶级索引
first_index = [x for x in parent_ind_list if x not in name_list]
# 处理结果的列表
covert_dic = {}
# 转换函数
def convert_format(data):
try:
for d in data:
if "parent_ind" in d and d["parent_ind"] in first_index and "name" in d:
covert_dic[d["parent_ind"]] = {d["name"]:{}}
for d1 in data:
if "parent_ind" in d1 and d1["parent_ind"] in list(covert_dic.keys()):
second = covert_dic[d1["parent_ind"]]
second[d1["name"]] = {}
for d2 in data:
for n in covert_dic.values():
if "parent_ind" in d2 and d2["parent_ind"] in list(n.keys()):
n[d2["parent_ind"]] = {d2["name"]:{}}
except KeyError as e:
print(e)
return covert_dic
if __name__ == '__main__':
dic = convert_format(industry_list)
print("result", dic)
# {
# "数码": {
# "电脑配件": {
# "内存" : {}
# }
# },
# "女装" : {
# "连衣裙": {},
# "半身裙": {},
# "A字裙": {}
# }
# }
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
b883a6e32adf1d96ba5d28de4f0c59f3953c1b9b | 1100bc9e3372615c36a7f012cd18d61cd98e8515 | /doc/source/conf.py | 85190f266d14c97d4078bdf0ea4c2c957f01f301 | [
"BSD-3-Clause"
] | permissive | mortezaomidi/scikit-criteria | fd3b1d5a394741607dc37d965a96ce3fb9d6acc3 | 2abad2e78e3833cca7ab660e34a978fcab6b92ef | refs/heads/master | 2020-03-29T23:57:08.030280 | 2018-06-22T04:03:44 | 2018-06-22T04:03:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,769 | py | # -*- coding: utf-8 -*-
#
# Scikit-Criteria documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 3 02:18:36 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# to retrieve scikit criteria metadata
os.environ["SKCRITERIA_IN_SETUP"] = "True"
import skcriteria
# modules to mock in readthedocs
MOCK_MODULES = []
#~ ["numpy", "scipy",
#~ "matplotlib", "matplotlib.pyplot",
#~ "matplotlib.cm", "matplotlib.patches",
#~ "matplotlib.spines", "matplotlib.projections.polar",
#~ "matplotlib.projections", "matplotlib.path"]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'numpydoc',
'nbsphinx']
numpydoc_class_members_toctree = False
nbsphinx_execute = 'never'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = skcriteria.NAME
copyright = u'2015-2016-2017-2018, Juan B. Cabral - Nadia A. Luczywo'
author = u'Juan BC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = skcriteria.VERSION
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
if on_rtd:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
else: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_favicon = "_static/favicon.ico"
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scikit-Criteriadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Scikit-Criteria.tex', u'Scikit-Criteria Documentation',
u'Juan BC', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'scikit-criteria', u'Scikit-Criteria Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Scikit-Criteria', u'Scikit-Criteria Documentation',
author, 'Scikit-Criteria', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def setup(app):
app.add_stylesheet('css/skcriteria.css')
app.add_javascript('js/skcriteria.js')
| [
"jbc.develop@gmail.com"
] | jbc.develop@gmail.com |
7bc6b1360f5ed8e427b08dc672e364c325c93b69 | f552ca018542184f34246405afb9b30999a57f2e | /comportamentais/interpreter/romanos/gramatica/doisDigitosRomanos.py | fed31c769a9ac5d83c0975fda5fcd40d5fbbd55f | [] | no_license | PlumpMath/DesignPatterns-440 | feea6847160e3c7393a2da80e6b22b9b2273ee92 | bef2ff66dddc90b7e6b529828b094bfc48754a01 | refs/heads/master | 2021-01-20T09:52:12.704627 | 2017-04-29T22:58:07 | 2017-04-29T22:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | from romanos.numeroRomanoInterpreter import NumeroRomanoInterpreter
class DoisDigitosRomanos(NumeroRomanoInterpreter):
def um(self):
return "X"
def quatro(self):
return "XL"
def cinco(self):
return "L"
def nove(self):
return "XC"
def multiplicador(self):
return 10
| [
"victorhad@gmail.com"
] | victorhad@gmail.com |
799f197eacb7e8a9cec6a5689e7a1177990a5153 | bf4a3781e1de2edcbec47285ca5c94bcd1789490 | /recipes/forms.py | 81b8647a440657a91ecc2255af017c72779a7826 | [] | no_license | roman-oxenuk/foodgram | 72a0df4f785fe7f10396ff7248bc80dc79bdb854 | c086f9ec5780f7e99e878b93b4e24e273d26d2a0 | refs/heads/master | 2022-12-21T11:21:32.742068 | 2020-09-18T14:30:16 | 2020-09-18T14:30:16 | 296,423,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | import re
from itertools import groupby
from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from django.core.exceptions import ValidationError
from .models import Tag, Recipe, Ingredient, IncludedInRecipe
class TagsFilterForm(forms.Form):
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=CheckboxSelectMultiple(),
to_field_name='name'
)
class RecipeCreateForm(forms.ModelForm):
class Meta:
model = Recipe
fields = ('title', 'tags')
# 'tags') # 'image', 'description', 'time_to_cook')
# widgets = {
# 'tags': CheckboxSelectMultiple(),
# }
def clean_title(self):
title = self.cleaned_data['title']
if title.lower() in ('create'):
raise ValidationError(f'Недопустимое имя "{title}"')
return title
class IncludedInRecipeForm(forms.ModelForm):
in_recipe_id = forms.CharField(required=False, widget=forms.HiddenInput)
ingredient = forms.ModelChoiceField(
queryset=Ingredient.objects.all(),
widget=forms.HiddenInput,
required=False
)
name = forms.CharField(widget=forms.HiddenInput)
amount = forms.CharField(widget=forms.HiddenInput)
units = forms.CharField(widget=forms.HiddenInput)
class Meta:
model = IncludedInRecipe
fields = ('amount', 'ingredient')
def save(self, commit=True):
if not hasattr(self.instance, 'ingredient'):
new_ingredient = Ingredient(
name=self.cleaned_data['name'],
units_id=Ingredient.DEFAULT_UNITS_PK
)
new_ingredient.save()
self.instance.ingredient = new_ingredient
super().save(commit)
def clean_amount(self):
amount = int(self.cleaned_data['amount'])
if amount <= 0:
raise ValidationError('Кол-во должно быть положительным числом')
return amount
# TODO:
# * is_valid запускает валидацию всех форм
# если определено parent_instance, то проверяем, принадлежат ли инстансы из форм этому parent_instance
# если нет, то игнорим их
#
# * save запускает save у форм и добавляет parent_instance как recipe
#
# * если передан initial, то передаём этот initial в формы
#
#
class FormCollection:
def __iter__(self):
return iter(self.forms)
def __init__(self, form_class=None, prefix=None,
data=None, files=None,
initial=None, parent_instance=None, parent_field=None,
min_values=None, max_values=None):
assert form_class, '"form_class" parameter must be set'
assert prefix, '"prefix" parameter must be set'
self.prefix = prefix
self.forms = []
if data:
data_list = []
pattern = re.compile(f'{self.prefix}-(\d+)-(.+)')
for param_name, value in data.items():
if param_name.startswith(self.prefix):
match = re.search(pattern, param_name)
if match:
data_list.append(
(match.group(1), match.group(2), value)
)
data_list.sort(key=lambda x: x[0])
groups = groupby(data_list, lambda x: x[0])
counter = 0
for ind, values in groups:
fields = {}
for value in values:
fields.update({value[1]: value[2]})
self.forms.append(
form_class(data=fields)
)
counter += 1
print(self.forms)
def is_valid(self):
is_valid_all = True
for form in self.forms:
if not form.is_valid():
is_valid_all = False
return is_valid_all
def save(self):
for form in self.forms:
form.instance.recipe = self.parent_instance
form.save()
| [
"roman.oxenuk@gmail.com"
] | roman.oxenuk@gmail.com |
6d9c44a70df192dc8b64762357655ec9e254437f | f43cf2eb27a960f3f855353b80ab1a476aef40ae | /mainSpace/files/search-comp-linear-binary.py | 263bfe94c04831aa155790adb0d6ce5ad9b4a7e3 | [
"MIT"
] | permissive | hanzhi713/ibcs-wd | 4fc8e56da85f49fe808ac0082c068dd368193b5a | 6f9ccf72cbf0319628073dd6f64fe7b4a623ebfb | refs/heads/master | 2020-03-23T09:51:04.151938 | 2018-08-02T08:06:03 | 2018-08-02T08:06:03 | 141,411,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,035 | py | import random
import timeit
import numpy as np
import matplotlib.pyplot as plt
def search_linear(xs, target):
""" Find and return the index of target in sequence xs """
for i in range(len(xs)):
if xs[i]== target:
return i
return -1
def search_binary_iter(xs, target):
""" Find and return the index of key in sequence xs """
lb = 0
ub = len(xs)
while True:
if lb == ub: # If region of interest (ROI) becomes empty
return -1
# Next probe should be in the middle of the ROI
mid_index = (lb + ub) // 2 #
# Fetch the item at that position
item_at_mid = xs[mid_index]
# print("ROI[{0}:{1}](size={2}), probed='{3}', target='{4}'"
# .format(lb, ub, ub-lb, item_at_mid, target))
# How does the probed item compare to the target?
if item_at_mid == target:
return mid_index # Found it!
if item_at_mid < target:
lb = mid_index + 1 # Use upper half of ROI next time
else:
ub = mid_index # Use lower half of ROI next time
# def search_binary_recur(xs, target):
# if len(xs) == 0:
# return -1
# else:
# mid_index = len(xs)//2
# if xs[mid_index]==target:
# return mid_index
# else:
# if target<xs[mid_index]:
# return search_binary_recur(xs[:mid_index],target)
# else:
# return search_binary_recur(xs[mid_index+1:],target)
def search_binary_recur(xs, target,lb, ub):
# if ub == None: ub = len(xs)-1
if lb == ub:
return -1
mid_index = lb + (ub - lb) // 2
if xs[mid_index] == target:
# print("recur ROI[{0}:{1}](size={2}), probed='{3}', target='{4}'"
# .format(lb, ub, ub - lb, xs[mid_index], target))
return mid_index
else:
if xs[mid_index]<target:
lb = mid_index+1
return search_binary_recur(xs,target,lb, ub)
else:
ub = mid_index - 1
return search_binary_recur(xs,target,lb,ub)
def test():
global testSeq
# print(testSeq)
target = random.choice(testSeq)
search_linear(testSeq, target)
def test_binary_iter():
global testSeq
# print(testSeq)
target = random.choice(testSeq)
search_binary_iter(testSeq, target)
def test_binary_recur():
global testSeq
# print(testSeq)
target = random.choice(testSeq)
search_binary_recur(testSeq, target,0,len(testSeq)-1)
if __name__=='__main__':
global testSeq
testNumber = 1000
NumList = list(range(10000, 11000, 1000))
# t1 = timeit.Timer("test()","from __main__ import test")
# print(t1.timeit(1000))
timeList = []
timeList_bin_recur = []
timeList_bin_iter = []
for TotalNum in NumList:
testSeq = list(range(TotalNum))
# random.shuffle(testSeq)
timeList.append(timeit.timeit("test()","from __main__ import test", number=testNumber))
timeList_bin_recur.append(timeit.timeit("test_binary_recur()", "from __main__ import test_binary_recur", number=testNumber))
timeList_bin_iter.append(timeit.timeit("test_binary_iter()", "from __main__ import test_binary_iter", number=testNumber))
plt.figure(1)
# plt.title('')
plot1 = plt.plot(NumList, timeList, 'ro',markersize = 15,label = 'linear')
plt.plot(NumList, timeList, 'r')
plot2 = plt.plot(NumList, timeList_bin_recur, 'g*',markersize = 15,label = 'binary_recur')
plt.plot(NumList, timeList_bin_recur, 'g')
plot2 = plt.plot(NumList, timeList_bin_iter, 'bx',markersize = 15,label = 'binary_iter')
plt.plot(NumList, timeList_bin_iter, 'b')
plt.xlabel('length of searching lists')
plt.ylabel('running time (s)')
plt.axis([0, round(max(NumList),-1)*1.1, 0, max(timeList)*1.1])
plt.legend(loc='upper left', numpoints=1)
plt.show()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
ec49bfd4deac33bb86287ee467872348197473ea | 5579f99b9feece7e64611436efa904a9f77a2771 | /tests/unit/test_regioninfo.py | c46614d4c7b0cdb176564d383f6f128c755f34ce | [
"MIT"
] | permissive | anton-prymak/rest-label-service-api-boto | ff08c09ec50b36628f2ac8374fc71be3986ff088 | c5ac3428dd5ae6b090b208f317639f60b778a184 | refs/heads/master | 2021-02-12T17:08:44.162577 | 2014-10-16T16:15:00 | 2014-10-16T16:15:00 | 244,610,370 | 2 | 0 | NOASSERTION | 2020-03-03T10:45:52 | 2020-03-03T10:45:52 | null | UTF-8 | Python | false | false | 5,162 | py | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
from tests.unit import unittest
import boto
from boto.regioninfo import RegionInfo, load_endpoint_json, merge_endpoints
from boto.regioninfo import load_regions, get_regions
class TestRegionInfo(object):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
self.connection = connection
self.name = name
self.endpoint = endpoint
self.connection_cls = connection_cls
class FakeConn(object):
pass
class TestEndpointLoading(unittest.TestCase):
def setUp(self):
super(TestEndpointLoading, self).setUp()
def test_load_endpoint_json(self):
endpoints = load_endpoint_json(boto.ENDPOINTS_PATH)
self.assertTrue('ec2' in endpoints)
self.assertEqual(
endpoints['ec2']['us-east-1'],
'ec2.us-east-1.amazonaws.com'
)
def test_merge_endpoints(self):
defaults = {
'ec2': {
'us-east-1': 'ec2.us-east-1.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
}
}
additions = {
# Top-level addition.
's3': {
'us-east-1': 's3.amazonaws.com'
},
'ec2': {
# Overwrite. This doesn't exist, just test data.
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
# Deep addition.
'us-west-2': 'ec2.us-west-2.amazonaws.com',
}
}
endpoints = merge_endpoints(defaults, additions)
self.assertEqual(endpoints, {
'ec2': {
'us-east-1': 'ec2.auto-resolve.amazonaws.com',
'us-west-1': 'ec2.us-west-1.amazonaws.com',
'us-west-2': 'ec2.us-west-2.amazonaws.com',
},
's3': {
'us-east-1': 's3.amazonaws.com'
}
})
def test_load_regions(self):
# Just the defaults.
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertFalse('test-1' in endpoints['ec2'])
# With ENV overrides.
os.environ['BOTO_ENDPOINTS'] = os.path.join(
os.path.dirname(__file__),
'test_endpoints.json'
)
self.addCleanup(os.environ.pop, 'BOTO_ENDPOINTS')
endpoints = load_regions()
self.assertTrue('us-east-1' in endpoints['ec2'])
self.assertTrue('test-1' in endpoints['ec2'])
self.assertEqual(endpoints['ec2']['test-1'], 'ec2.test-1.amazonaws.com')
def test_get_regions(self):
# With defaults.
ec2_regions = get_regions('ec2')
self.assertEqual(len(ec2_regions), 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertTrue(isinstance(west_2, RegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, None)
def test_get_regions_overrides(self):
ec2_regions = get_regions(
'ec2',
region_cls=TestRegionInfo,
connection_cls=FakeConn
)
self.assertEqual(len(ec2_regions), 10)
west_2 = None
for region_info in ec2_regions:
if region_info.name == 'us-west-2':
west_2 = region_info
break
self.assertNotEqual(west_2, None, "Couldn't find the us-west-2 region!")
self.assertFalse(isinstance(west_2, RegionInfo))
self.assertTrue(isinstance(west_2, TestRegionInfo))
self.assertEqual(west_2.name, 'us-west-2')
self.assertEqual(west_2.endpoint, 'ec2.us-west-2.amazonaws.com')
self.assertEqual(west_2.connection_cls, FakeConn)
if __name__ == '__main__':
unittest.main()
| [
"daniel@toastdriven.com"
] | daniel@toastdriven.com |
08f894625cc638dbc6603bcb5f825a745c2ee7ae | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/ops/split_benchmark.py | fa3706c9919443fd046fe4174921702f68fa3b20 | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,854 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for split and grad of split."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
def build_graph(device, input_shape, output_sizes, axis):
"""Build a graph containing a sequence of split operations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensor.
output_sizes: size of each output along axis.
axis: axis to be split along.
Returns:
An array of tensors to run()
"""
with ops.device("/%s:0" % device):
inp = array_ops.zeros(input_shape)
outputs = []
for _ in range(100):
outputs.extend(array_ops.split(inp, output_sizes, axis))
return control_flow_ops.group(*outputs)
class SplitBenchmark(test.Benchmark):
"""Benchmark split!"""
def _run_graph(self, device, output_shape, variable, num_outputs, axis):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
output_shape: shape of each output tensors.
variable: whether or not the output shape should be fixed
num_outputs: the number of outputs to split the input into
axis: axis to be split
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
if not variable:
if axis == 0:
input_shape = [output_shape[0] * num_outputs, output_shape[1]]
sizes = [output_shape[0] for _ in range(num_outputs)]
else:
input_shape = [output_shape[0], output_shape[1] * num_outputs]
sizes = [output_shape[1] for _ in range(num_outputs)]
else:
sizes = np.random.randint(
low=max(1, output_shape[axis] - 2),
high=output_shape[axis] + 2,
size=num_outputs,
)
total_size = np.sum(sizes)
if axis == 0:
input_shape = [total_size, output_shape[1]]
else:
input_shape = [output_shape[0], total_size]
outputs = build_graph(device, input_shape, sizes, axis)
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0
)
)
)
with session_lib.Session(graph=graph, config=config) as session:
logging.set_verbosity("info")
variables.global_variables_initializer().run()
bench = benchmark.TensorFlowBenchmark()
bench.run_op_benchmark(
session,
outputs,
mbs=input_shape[0] * input_shape[1] * 4 * 2 * 100 / 1e6,
extras={"input_shape": input_shape, "variable": variable, "axis": axis},
)
def benchmark_split(self):
print("Forward vs backward concat")
shapes = [
[2000, 8],
[8, 2000],
[100, 18],
[1000, 18],
[10000, 18],
[100, 97],
[1000, 97],
[10000, 1],
[1, 10000],
]
axis_ = [1] # 0 is very fast because it doesn't actually do any copying
num_outputs = 100
variable = [False, True] # fixed input size or not
for shape in shapes:
for axis in axis_:
for v in variable:
self._run_graph("gpu", shape, v, num_outputs, axis)
if __name__ == "__main__":
test.main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
ad9406e1aa3c32013e81627aea2e446fc242e346 | 054595a968d1a4b9439f2fd8d94bfc640834b6ef | /backend/joinus_26090/urls.py | b99bcb433ed7884c12376e4309f704f8dba325ee | [] | no_license | crowdbotics-apps/joinus-26090 | ed8c63f5326b642bd921b1262f8caedc010bc7d3 | 361395e24ed6026fde861b9201254302a8921c67 | refs/heads/master | 2023-06-03T17:30:51.994550 | 2021-05-03T16:30:16 | 2021-05-03T16:30:16 | 363,992,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | """joinus_26090 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "joinus"
admin.site.site_title = "joinus Admin Portal"
admin.site.index_title = "joinus Admin"
# swagger
api_info = openapi.Info(
title="joinus API",
default_version="v1",
description="API documentation for joinus App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
e6ff88084ade38c18e9a171a3e7c84dfa224c803 | 2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b | /75.颜色分类.py | d7275bda1f2da6dabef9c4380e3f17d7bb0ac379 | [] | no_license | mqinbin/python_leetcode | 77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3 | 73e0c81867f38fdf4051d8f58d0d3dc245be081e | refs/heads/main | 2023-03-10T18:27:36.421262 | 2021-02-25T07:24:10 | 2021-02-25T07:24:10 | 314,410,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | #
# @lc app=leetcode.cn id=75 lang=python3
#
# [75] 颜色分类
#
# @lc code=start
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
p0 = p1 = 0
for i in range(n):
if nums[i] == 1:
nums[i],nums[p1] = nums[p1],nums[i]
p1 += 1
elif nums[i] == 0:
nums[i],nums[p0] = nums[p0],nums[i]
if p0<p1:
nums[i],nums[p1] = nums[p1],nums[i]
p0 += 1
p1 += 1
# @lc code=end
| [
"mqinbin@gmail.com"
] | mqinbin@gmail.com |
2b7027cad3eff16ee075235c3e738f08948e6a66 | da687718aa8ce62974090af63d25e057262e9dfe | /ATIVIDADES/polling_routine.py | c6810bcc93587456920cfd7467db7a0fce6048a9 | [] | no_license | frclasso/revisao_Python_modulo1 | 77928fa4409c97d49cc7deccdf291f44c337d290 | 1e83d0ef9657440db46a8e84b136ac5f9a7c556e | refs/heads/master | 2020-06-25T05:37:28.768343 | 2019-07-27T22:23:58 | 2019-07-27T22:23:58 | 199,217,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #!/usr/bin/env python3
"""Polling for Piza to cure the hunger"""
import time
hungry = True
while hungry:
print('\n'"Opening the front door")
front_door = open("front_door.txt", 'r')
if "Pizza Guy" in front_door:
print("Pizza's here!")
hungry = False
else:
print("Not yet...")
print("Closing the front door")
front_door.close()
time.sleep(2)
print()
print('Done!!') | [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
2019e102b837cabfd175de63d3052332582fb68e | f591c799d213ad2ff2f9b4583ea2451bc2530aa8 | /pele_platform/Utilities/Helpers/plop_launcher.py | 3d5d3a524765fd5efc65d76889639d73659a6921 | [
"MIT",
"Apache-2.0"
] | permissive | carlesperez94/pele_platform | f390e8ebd0f86550feb8acf05aaa79d5b6e2b7a2 | ad6e8919373b39590ae654ee5ae5af0ae0f71c6b | refs/heads/master | 2022-08-24T19:44:40.082142 | 2020-06-17T11:01:39 | 2020-06-17T11:01:39 | 224,458,495 | 0 | 0 | Apache-2.0 | 2020-06-17T11:03:12 | 2019-11-27T15:16:30 | Python | UTF-8 | Python | false | false | 2,080 | py | import os
import PlopRotTemp as plop
import pele_platform.constants.constants as cs
import pele_platform.Utilities.Helpers.helpers as hp
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
except SyntaxError:
import subprocess
def parametrize_miss_residues(args, env, syst, resname=None):
resname = args.residue if not resname else resname
SPYTHON = os.path.join(cs.SCHRODINGER, "utilities/python")
if not os.path.exists(SPYTHON):
SPYTHON = os.path.join(cs.SCHRODINGER, "run")
file_path = os.path.join(os.path.dirname(plop.__file__), "main.py")
options = retrieve_options(args, env)
templatedir = os.path.join(env.pele_dir, "DataLocal/Templates/OPLS2005/HeteroAtoms")
rotamerdir = os.path.join(env.pele_dir, "DataLocal/LigandRotamerLibs")
mae_cahrges = True if args.mae_lig else False
my_env = os.environ.copy()
my_env["SCHRODINGER_PYTHONPATH"]=os.path.join(cs.SCHRODINGER, "internal/lib/python2.7/site-packages/")
my_env["SCHRODINGER"]=cs.SCHRODINGER
print("Running Plop")
print("{} {} {} {} --outputname {} --templatedir {} --rotamerdir {}".format(SPYTHON, file_path, options, syst.lig, resname, templatedir, rotamerdir))
subprocess.call("{} {} {} {} --outputname {} --templatedir {} --rotamerdir {}".format(SPYTHON, file_path, options, syst.lig, resname, templatedir, rotamerdir).split(), env=my_env)
#hp.silentremove([syst.lig])
def retrieve_options(args, env):
"""
Retrieve PlopRotTemp options from input arguments
"""
options = []
if args.core != -1:
options.extend(["--core {}".format(args.core)])
if args.mtor != 4:
options.extend(["--mtor {}".format(args.mtor)])
if args.n != 1000:
options.extend(["--n {}".format(args.n)])
if args.forcefield != "OPLS2005":
options.extend(["--force {}".format(args.forcefield)])
if args.mae_lig:
options.extend(["--mae_charges"])
if args.gridres != 10:
options.extend(["--gridres {}".format(args.gridres)])
return " ".join(options)
| [
"daniel.soler@e-campus.uab.cat"
] | daniel.soler@e-campus.uab.cat |
6742052170a8609b5e4e64a4478f6516e2214114 | 7ac8ea8ccc658d1461dcd2667a2ce8ef7909eea0 | /leetcode/2000_Reverse_prefix_of_word.py | d52224c2e9dbe6d755e7737044782c6c8e8a29e4 | [] | no_license | aasthaagrawal/Algorithms_and_Data_Structures | a7bbfa4ce70ba873c8957c3524b612a97194682d | cd23bddc7766d13a9896356b177caeb533f2f433 | refs/heads/master | 2022-04-27T05:30:19.080934 | 2022-03-30T23:03:37 | 2022-03-30T23:03:37 | 173,667,618 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #https://leetcode.com/problems/reverse-prefix-of-word/
#Complexity: O(n)
class Solution:
def reversePrefix(self, word: str, ch: str) -> str:
n = len(word)
index = 0
while index<n:
if word[index]==ch:
break
index += 1
if index==n:
return word
index += 1
return ''.join(reversed(word[:index])) + word[index:]
| [
"aasthaagrawal94@gmail.com"
] | aasthaagrawal94@gmail.com |
2bfcc5bb798107e14efff6e42cef5807eaf04af8 | 3dcf3b4d1822fefc0dcab8195af1239abe7971a1 | /AMAO/apps/Professor/views/consultar_notas.py | 6f2578e1ae3360e04f2a6bbdbb003d0af409b7d6 | [
"MIT"
] | permissive | arruda/amao | a1b0abde81be98a04dee22af9ff0723ed7697fb8 | 83648aa2c408b1450d721b3072dc9db4b53edbb8 | refs/heads/master | 2021-01-13T02:11:52.776011 | 2014-09-20T15:43:16 | 2014-09-20T15:43:16 | 23,271,083 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | # -*- coding: utf-8 -*-
from django.contrib.auth import login
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import user_passes_test
from annoying.decorators import render_to
from django.contrib.auth.models import User
from Professor.models import Professor, Monitor
from Professor.views.utils import prof_monit_exist
from Avaliacao.models import TemplateAvaliacao, Avaliacao
from Avaliacao.Questao.models import QuestaoDeAvaliacao
from Avaliacao.Questao.forms import AlterarNotaQuestaoForm
@prof_monit_exist
@login_required
@render_to('professor/consultar.html')
def consultar(request):
prof=None
monitor=None
try:
prof = request.user.professor_set.get()
except Professor.DoesNotExist:
try:
monitor = request.user.monitor_set.get()
except Monitor.DoesNotExist:
return redirect('/')
template_id = request.GET.get('template',None)
avaliacao_id = request.GET.get('avaliacao',None)
questao_id = request.GET.get('questao',None)
template = None
if template_id != None:
template = get_object_or_404(TemplateAvaliacao,id=template_id)
elif avaliacao_id != None:
avaliacao = get_object_or_404(Avaliacao,id=avaliacao_id)
template=avaliacao.templateAvaliacao
elif questao_id != None:
questao = get_object_or_404(QuestaoDeAvaliacao,id=questao_id)
if request.POST:
form_questao = AlterarNotaQuestaoForm(request.POST, instance=questao)
form_questao.save()
else:
form_questao = AlterarNotaQuestaoForm(instance=questao)
template=questao.avaliacao.templateAvaliacao
if prof != None:
if template == None:
templates = TemplateAvaliacao.objects.filter(turma__in=prof.turmas.all(),ativa=False)
return locals()
if not template.verifica_professor(prof):
return redirect('/')
else:
if template == None:
templates = TemplateAvaliacao.objects.filter(turma__in=monitor.materia.turmas.all(),ativa=False)
return locals()
if not template.verifica_monitor(monitor):
return redirect('/')
template = None if template_id == None else template
return locals()
| [
"felipe.arruda.pontes@gmail.com"
] | felipe.arruda.pontes@gmail.com |
31d871da06f55a3ef66b7c60acac85467ed5d7c5 | 5b6b2018ab45cc4710cc5146040bb917fbce985f | /411_gray-code/gray-code.py | 44fedab5de18cbab81892a85d91a340a62d6487d | [] | no_license | ultimate010/codes_and_notes | 6d7c7d42dcfd84354e6fcb5a2c65c6029353a328 | 30aaa34cb1c840f7cf4e0f1345240ac88b8cb45c | refs/heads/master | 2021-01-11T06:56:11.401869 | 2016-10-30T13:46:39 | 2016-10-30T13:46:39 | 72,351,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | # coding:utf-8
'''
@Copyright:LintCode
@Author: ultimate010
@Problem: http://www.lintcode.com/problem/gray-code
@Language: Python
@Datetime: 16-06-29 15:23
'''
class Solution:
# @param {int} n a number
# @return {int[]} Gray code
def grayCode(self, n):
if n == 0:
return [0]
elif n == 1:
return [0, 1]
t = self.grayCode(n - 1)
ret = t[:]
for i in reversed(t):
ret.append((1 << (n - 1)) | i)
return ret
def grayCode1(self, n):
# Write your code here
def b2str(n, bits):
ret = ''
while bits > 0:
bits -= 1
if n & 1:
ret = '1' + ret
else:
ret = '0' + ret
n >>= 1
return ret
def str2b(s):
ret = 0
for i,c in enumerate(reversed(s)):
if c == '1':
ret += 1 << i
return ret
if n == 0:
return [0]
if n == 1:
return [str2b('0'), str2b('1')]
else:
t = self.grayCode(n - 1)
z = ['0' + b2str(i, n - 1) for i in t] + ['1' + b2str(i, n - 1) for i in reversed(t)]
return map(str2b, z) | [
"ultimate010@gmail.com"
] | ultimate010@gmail.com |
a779e96917e85b0b9d3635ca3032b0c3bbb294c9 | 7e0390cb815da04c4d4192a690057f50c811c46c | /users/adapter.py | 8f9e2c450958ee2b4b825ed1ea319befd14e00e2 | [] | no_license | RikSchoonbeek/DRF-rest-auth-custom-user-model-with-registration | db0b39b254a25243dae2cc36f70f50e71a43524b | 88fabab6abb1763cfdec3897a128a6fe08f34fca | refs/heads/master | 2020-04-15T00:16:31.940437 | 2019-01-05T16:51:56 | 2019-01-05T16:51:56 | 164,233,670 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from allauth.account.adapter import DefaultAccountAdapter
class CustomAccountAdapter(DefaultAccountAdapter):
def save_user(self, request, user, form, commit=False):
print("CustomAccountAdapter.save_user()")
user = super().save_user(request, user, form, commit)
data = form.cleaned_data
print("form.cleaned_data:")
print(form.cleaned_data)
user.preferred_locale = data.get('preferred_locale')
user.save()
return user | [
"rik.schoonbeek@gmail.com"
] | rik.schoonbeek@gmail.com |
a504cffeeace695bd13e4d89a7d97821b399e6a4 | e3d739317399940aac5e2fcd468da3fa94c87a1c | /Projects/OOP/opp-0112.py | 1ac0046719522f7e0324e0a69cf2c9d93e475a12 | [] | no_license | mohsen-mehrabani/Python_Udemy_Course | 9a03222b3ec5963108a48a044b15e2869b6d3338 | fdaef0fd970a956bccb0ee28de42b8ecf44d59b6 | refs/heads/master | 2021-01-30T13:11:10.762363 | 2020-05-01T14:02:16 | 2020-05-01T14:02:16 | 243,500,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | class Kettle(object):
power_source = "Electricity"
def __init__(self, make, price):
self.make = make
self.price = price
self.on = False
def switch_on(self):
self.on = True
Kenwood = Kettle("Kenwood", 8.99)
print(Kenwood.make)
print(Kenwood.price)
Kenwood.price = 12.99
print(Kenwood.price)
Hamilton = Kettle("Hamilton", 14.55)
print("Models: {} = {}, {} = {}".format(Kenwood.make, Kenwood.price, Hamilton.make, Hamilton.price))
print("Models: {0.make} = {0.price}, {1.make} = {1.price}".format(Kenwood, Hamilton))
"""
Class: Template for creating objects. All objects created using the same class will have the same characteristics.
Object: An instance of a class.
Instantiate: Create an instance of a class.
Method: A function defined in a class.
Attribute: A variable bound to an instance of a class.
"""
print(Hamilton.on)
Hamilton.switch_on()
print(Hamilton.on)
Kettle.switch_on(Kenwood)
print(Kenwood.on)
Kenwood.switch_on()
print("*" * 60)
Kenwood.power = 1.5
print(Kenwood.power)
# print(Hamilton.power)
print(Kettle.power_source)
print(Kenwood.power_source)
print(Hamilton.power_source)
print(Kettle.__dict__)
print(Kenwood.__dict__)
print(Hamilton.__dict__)
print("Switch to Atomic")
Kettle.power_source = "Atomic"
print(Kettle.power_source)
print(Kenwood.power_source)
print(Hamilton.power_source)
print("Switch Kenwood to Gas")
Kenwood.power_source = "Gas"
print(Kettle.power_source)
print(Kenwood.power_source)
print(Hamilton.power_source) | [
"mohsen.mehrabani@hotmail.com"
] | mohsen.mehrabani@hotmail.com |
62f2aff601aac2c26d80d46614167a6019a78de5 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /kiX7WjSFeTmBYcEgK_0.py | c6603c9d77b51bff8fac27af9fa5c7430675534c | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py |
def major_sum(lst):
pos, neg, zero = 0, 0, 0
for n in lst:
if n > 0:
pos += n
elif n < 0:
neg += n
else:
zero += 1
return max(pos, neg, zero, key=abs)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
456ddc34223f182eacc439317cbdf0440dd3d201 | 1eaf69357dfca66e4dc6651da2b93db8665164f9 | /2018/14/code.py | e054e45736b4ec40c696ecafb4c0f37b8fe11538 | [
"MIT"
] | permissive | Akumatic/Advent-of-Code | deb89b9a5302999ffb344766bb3f1b0dd5272445 | 5377d8d653092246a7a35c7fa2a3e22cc74ebb0b | refs/heads/master | 2022-12-21T20:32:05.978675 | 2022-12-16T14:41:23 | 2022-12-16T14:41:23 | 221,700,755 | 24 | 13 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | """ https://adventofcode.com/2018/day/14 """
def readFile():
with open(f"{__file__.rstrip('code.py')}input.txt", "r") as f:
return f.read()
def part1(vals):
border = int(vals)
scores = [3, 7]
elf1 = 0
elf2 = 1
i = 2
while i < border + 10:
new = scores[elf1] + scores[elf2]
if new > 9:
scores.append(new // 10)
scores.append(new % 10)
i += 2
else:
scores.append(new)
i += 1
elf1 = (elf1 + 1 + scores[elf1]) % i
elf2 = (elf2 + 1 + scores[elf2]) % i
return "".join([str(i) for i in scores[border:border + 10]])
def part2(vals):
size = len(vals)
comp = [int(c) for c in vals]
scores = [3, 7]
elf1 = 0
elf2 = 1
i = 2
while True:
new = scores[elf1] + scores[elf2]
if new > 9:
scores.append(new // 10)
scores.append(new % 10)
i += 2
if scores[-(size + 1):-1] == comp:
return i - (size + 1)
else:
scores.append(new)
i += 1
if scores[-size:] == comp:
return i - size
elf1 = (elf1 + 1 + scores[elf1]) % i
elf2 = (elf2 + 1 + scores[elf2]) % i
if __name__ == "__main__":
vals = readFile()
print(f"Part 1: {part1(vals)}")
print(f"Part 2: {part2(vals)}") | [
"ugp@hotmail.de"
] | ugp@hotmail.de |
be747dceec689a48784786829aeaba72d050c65f | 42d3d37a3dd22402154da4f4bd020afd7b7bad58 | /examples/adspygoogle/adwords/v201206/basic_operations/update_ad_group.py | 1f87faec6fe4870cec0c8689c6d0df893893ec41 | [
"Apache-2.0"
] | permissive | nearlyfreeapps/python-googleadwords | 1388316ec4f8d9d6074688ec4742872b34b67636 | b30d90f74248cfd5ca52967e9ee77fc4cd1b9abc | refs/heads/master | 2020-06-03T23:05:08.865535 | 2012-08-02T21:46:16 | 2012-08-02T21:46:16 | 5,278,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates status for a given ad group. To get ad groups, run
get_ad_groups.py.
Tags: AdGroupService.mutate
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_service = client.GetAdGroupService(
'https://adwords-sandbox.google.com', 'v201206')
# Construct operations and update an ad group.
operations = [{
'operator': 'SET',
'operand': {
'id': ad_group_id,
'status': 'PAUSED'
}
}]
ad_groups = ad_group_service.Mutate(operations)[0]
# Display results.
for ad_group in ad_groups['value']:
print ('Ad group with name \'%s\' and id \'%s\' was updated.'
% (ad_group['name'], ad_group['id']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id)
| [
"ahalligan@nearlyfreehosting.com"
] | ahalligan@nearlyfreehosting.com |
ea66f322c1abbdc9d22386d490d7d3db87217f03 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03496/s011585897.py | 43a8c91c3215bfc464cb3350afd93cbfa4bfdce0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | from collections import Counter, defaultdict
import sys
sys.setrecursionlimit(10 ** 5 + 10)
# input = sys.stdin.readline
from math import factorial
import heapq, bisect
import math
import itertools
import queue
from collections import deque
def main():
num = int(input())
data = list(map(int, input().split()))
min_num = min(data)
max_num = max(data)
ans_list = []
if max_num + min_num >= 0:
idx = data.index(max_num)
for i in range(num):
if data[i] == max_num:
continue
ans_list.append([idx + 1, i + 1])
for i in range(num - 1):
ans_list.append([i + 1, i + 2])
else:
idx = data.index(min_num)
for i in range(num):
if data[i] == min_num:
continue
ans_list.append([idx + 1, i + 1])
for i in range(num - 1)[::-1]:
ans_list.append([i + 2, i + 1])
print(len(ans_list))
for i, j in ans_list:
print(i, j)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0cb9d9aac1bb5d8e211e4f650e8ca35b082e152b | 0d04eb03ed5163362e563192dced2691e0146f59 | /sorbet/feedmanager/urls.py | af1bd9292537cb061c9b2a257fc608750861e220 | [
"BSD-3-Clause",
"CC-BY-3.0",
"BSD-2-Clause"
] | permissive | jacobjbollinger/sorbet | 428fc277099daed11e1c81a5037af5e1192ee893 | a222098e3926ca56c7704499bc0875da2bc0ba71 | refs/heads/master | 2020-04-01T20:19:51.454219 | 2012-05-07T17:29:40 | 2012-05-10T11:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | from django.conf.urls import patterns, url
urlpatterns = patterns('sorbet.feedmanager.views',
url(r'^featured/$', 'featured', name='featured'),
url(r'^$', 'feeds', name='feeds'),
url(r'^add-feed/$', 'add_feed', name='add-feed'),
url(r'^remove-feed/(?P<feed_id>\d*)$', 'remove_feed', name='remove-feed'),
)
| [
"isaac@bythewood.me"
] | isaac@bythewood.me |
019196bafc67176ddcb0a2fba69a151ce70b83c8 | ce8e34c1c2ac180f95c577c03da71a0671ccf13e | /app/main/view.py | d6740c61aa44651f3ed48390fe0878b368338927 | [
"MIT"
] | permissive | leskeylevy/newsapp | 0cc481ab9f08297dddb2d3272bb59428d9a198d5 | 3d28deaae3ad85c89aa93a3a21ad4ccebc28ff80 | refs/heads/master | 2020-03-27T17:33:08.719265 | 2018-09-05T03:24:24 | 2018-09-05T03:24:24 | 146,858,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | from flask import render_template
from . import main
from ..request import get_source,get_article
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# Getting source news
news = get_source()
title = 'Welcome to The best news site there is out here'
return render_template('index.html', title=title, news=news)
@main.route('/source/<name>')
def articles(name):
'''
source page fxn that returns the source details page and data
:param name:
:return:
'''
articles_display = get_article(name)
return render_template('source.html',articles=articles_display)
| [
"leskeylevy@gmail.com"
] | leskeylevy@gmail.com |
07d5a58350892254bfe9ffc9aaacee1a02c6a1df | 4027d8dafb6f60568f03357e329c09262161e963 | /machinelearn/test/TestDict.py | 692c4c0c60d823986a3b533f54755fd8844b78c3 | [] | no_license | pentiumCM/machinelearn | a2bfa15d6e9f20fd604116f77186da76ebcc4f27 | 329bb9521b5e06e3471aa209fc87ca47f8d5fdcb | refs/heads/master | 2022-12-08T23:43:05.784930 | 2021-05-24T04:02:23 | 2021-05-24T04:02:23 | 216,704,188 | 7 | 1 | null | 2022-12-08T09:30:07 | 2019-10-22T02:13:45 | Python | UTF-8 | Python | false | false | 1,320 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@Author : pentiumCM
@Email : 842679178@qq.com
@Software: PyCharm
@File : TestDict.py
@Time : 2019/12/17 14:35
@desc : 单元测试
'''
import unittest
from test.mydict import Dict
class TestDict(unittest.TestCase):
def test_init(self):
print("hello World1")
d = Dict(a=1, b='test')
self.assertEqual(d.a, 1)
self.assertEqual(d.b, 'test')
self.assertTrue(isinstance(d, dict))
def test_key(self):
print("hello World2")
d = Dict()
d['key'] = 'value'
self.assertEqual(d.key, 'value')
def test_attr(self):
print("hello World3")
d = Dict()
d.key = 'value'
self.assertTrue('key' in d)
self.assertEqual(d['key'], 'value')
def test_keyerror(self):
print("hello World4")
d = Dict()
with self.assertRaises(KeyError):
value = d['empty']
def test_attrerror(self):
print("hello World5")
d = Dict()
with self.assertRaises(AttributeError):
value = d.empty
def test_hello(self):
print("hello World")
def setUp(self):
print('setUp...')
def tearDown(self):
print('tearDown...')
if __name__ == '__main__':
unittest.main()
| [
"842679178@qq.com"
] | 842679178@qq.com |
a91874733b901b7fbdb72d484c7ca2f0d24395ec | e2d22f12f8e540a80d31de9debe775d35c3c5c22 | /blousebrothers/users/adapters.py | 18e497b933cb2472316ed79f4bdf6349419f237b | [
"MIT"
] | permissive | sladinji/blousebrothers | 360c3b78ec43379977dbf470e5721e6a695b2354 | 461de3ba011c0aaed3f0014136c4497b6890d086 | refs/heads/master | 2022-12-20T10:24:07.631454 | 2019-06-13T13:17:35 | 2019-06-13T13:17:35 | 66,867,705 | 1 | 0 | NOASSERTION | 2022-12-19T18:15:44 | 2016-08-29T18:04:33 | Python | UTF-8 | Python | false | false | 778 | py | # -*- coding: utf-8 -*-
from django.conf import settings
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from blousebrothers.tools import check_bonus
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
return getattr(settings, 'ACCOUNT_ALLOW_REGISTRATION', True)
def get_login_redirect_url(self, request):
check_bonus(request)
if 'next' in request.GET:
return request.GET['next']
return super().get_login_redirect_url(request)
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request, sociallogin):
return getattr(settings, 'ACCOUNT_ALLOW_REGISTRATION', True)
| [
"julien.almarcha@gmail.com"
] | julien.almarcha@gmail.com |
e61be70cd40c1e5d71b1ef2288924c7a82687b10 | 9c5e09b4f048a13961c0f4a1370a7bf01a421d92 | /gym/envs/mujoco/pusher.py | 44916bace51fd4599a36555e9d3ac297e18ece28 | [
"MIT"
] | permissive | StanfordVL/Gym | daa8c780f5ace3e33c3bf0f7109f40a0a820d59e | 5e14d19e57d8ba318b97a5edda0ab2ea591dea08 | refs/heads/master | 2023-02-03T02:44:40.185713 | 2020-12-17T14:10:16 | 2020-12-17T14:10:16 | 280,579,514 | 9 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | import numpy as np
from ... import utils
from . import mujoco_env
import mujoco_py
class PusherEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'pusher.xml', 5)
def step(self, a):
vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm")
vec_2 = self.get_body_com("object") - self.get_body_com("goal")
reward_near = - np.linalg.norm(vec_1)
reward_dist = - np.linalg.norm(vec_2)
reward_ctrl = - np.square(a).sum()
reward = reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist,
reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.0
def reset_model(self):
qpos = self.init_qpos
self.goal_pos = np.asarray([0, 0])
while True:
self.cylinder_pos = np.concatenate([
self.np_random.uniform(low=-0.3, high=0, size=1),
self.np_random.uniform(low=-0.2, high=0.2, size=1)])
if np.linalg.norm(self.cylinder_pos - self.goal_pos) > 0.17:
break
qpos[-4:-2] = self.cylinder_pos
qpos[-2:] = self.goal_pos
qvel = self.init_qvel + self.np_random.uniform(low=-0.005,
high=0.005, size=self.model.nv)
qvel[-4:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[:7],
self.sim.data.qvel.flat[:7],
self.get_body_com("tips_arm"),
self.get_body_com("object"),
self.get_body_com("goal"),
])
| [
"shawn@DNa1c068f.SUNet"
] | shawn@DNa1c068f.SUNet |
b9037e41261394cd2866e2ce12e3e87ee7417c83 | 474213ea39a2426494922ca582d2ee1791b5bee4 | /Normal/MinStack.py | 585af7564959e73842481d3d0602b8ff75e2dbcf | [] | no_license | rednithin/LeetCode | e6f9816d7b005b8899906bf6b6e5fd8292907ebd | d84331942c744a9093564011baf4c1016cbf4bd3 | refs/heads/master | 2021-07-15T04:17:47.860688 | 2020-05-17T16:50:03 | 2020-05-17T16:50:03 | 146,023,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.minStack = []
self.stack = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack.append(x)
if(len(self.minStack) == 0 or x <= self.minStack[-1]):
self.minStack.append(x)
def pop(self):
"""
:rtype: void
"""
x = self.stack.pop()
if(x == self.minStack[-1]):
self.minStack.pop()
def top(self):
"""
:rtype: int
"""
return self.stack[-1]
def getMin(self):
"""
:rtype: int
"""
return self.minStack[-1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| [
"reddy.nithinpg@live.com"
] | reddy.nithinpg@live.com |
0b4bd30347c7f0bad001058375cd9bef77565191 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02806/s464748204.py | 01b7e2d0a07dd1e41fc8aecf0217852395c9789d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | N = int(input())
s = [input().split() for _ in range(N)]
X = input()
for i in range(N):
if X == s[i][0]:
break
ans = 0
for j in range(i+1, N):
ans += int(s[j][1])
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cbf47d9ca25673be2d8d72be7f841dc6c0f59526 | 41ea088695ed956ef8c6e34ace4d8ab19c8b4352 | /XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/7hhGkQ_BuF4-Lce4Danz_vkQ_0SJJu2d9gel2iUad1A=/_zeros.cpython-37m-x86_64-linux-gnu.pyi | 93056b4078268a7cc31f9a8d0cb01da78508b475 | [] | no_license | ljbelenky/decline | d5c1d57fd927fa6a8ea99c1e08fedbeb83170d01 | 432ef82a68168e4ac8635a9386af2aa26cd73eef | refs/heads/master | 2021-06-18T17:01:46.969491 | 2021-04-26T18:34:55 | 2021-04-26T18:34:55 | 195,559,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | pyi | __doc__ = None
__file__ = '/home/land/.local/lib/python3.7/site-packages/scipy/optimize/_zeros.cpython-37m-x86_64-linux-gnu.so'
__name__ = 'scipy.optimize._zeros'
__package__ = 'scipy.optimize'
def _bisect():
'a'
pass
def _brenth():
'a'
pass
def _brentq():
'a'
pass
def _ridder():
'a'
pass
| [
"ljbelenky@gmail.com"
] | ljbelenky@gmail.com |
7ce342710c6df776e08f2122553556d2dc76f8a2 | 747d8a0933f336b6f88e10f5c1b977ed8dbf8014 | /CNN/CIFAR10_with_CNN_model2.py | 8d112533233203e8836e7f0585641a4785a800e7 | [] | no_license | chixujohnny/mlWithKeras | 24206ee70631278e93f1223b68fb8f578e62e728 | 27f3d70460b60ab52883eaf8da0858d7430a1152 | refs/heads/master | 2020-05-02T19:19:03.928200 | 2019-05-09T03:23:54 | 2019-05-09T03:23:54 | 178,156,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,256 | py | # coding: utf-8
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD, Adam, RMSprop
import matplotlib.pyplot as plt
# 相比model1优化了网络结构,做了一个更deep的网络
# CIFAR-10是一个包含60000张32*32像素的三通道图像数据集
IMG_CHANNELS = 3
IMG_ROWS = 32
IMG_COLS = 32
# 常量
BATCH_SIZE = 128
NB_EPOCH = 20
NB_CLASSES = 10
VERBOSE = 1
VALIDATION_SPLIT = 0.2
OPTIM = RMSprop()
# 加载数据
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# 对label做one-hot
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# RGB值看成float并归一化
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# 网络结构
model = Sequential()
model.add(Conv2D(32, (3,3), padding='same', input_shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3,3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3,3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
# 训练
model.compile(loss='categorical_crossentropy', optimizer=OPTIM, metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, validation_split=VALIDATION_SPLIT, verbose=VERBOSE)
score = model.evaluate(X_test, Y_test, batch_size=BATCH_SIZE, verbose=VERBOSE)
print('Test score: ', score[0])
print('Test acc: ', score[1])
# 保存模型
model_json = model.to_json()
open('./cifar10_architecture.json').write(model_json)
model.save_weights('cifar10_weight.h5', overwrite=True) | [
"1390463349@qq.com"
] | 1390463349@qq.com |
b0dcc6aa0ebae887e1d50aa9ae94eadd8a2f0b64 | 2d5171ac7f2640ed73b48aebf4b96e29d5cad818 | /ABC169/E.py | 79a46f538893845acbaa357987b7f04d7138a73f | [] | no_license | kentahoriuchi/Atcorder | d7b8308424175f32d47f24bb15303695780e1611 | f6449d4e9dc7d92210497e3445515fe95b74c659 | refs/heads/master | 2023-06-06T09:26:46.963642 | 2021-06-13T15:08:04 | 2021-06-13T15:08:04 | 255,396,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | N = int(input())
A = []
B = []
for _ in range(N):
a,b = list(map(int, input().split()))
A.append(a)
B.append(b)
A.sort()
B.sort()
if N%2 != 0:
print(B[(N+1)//2-1]-A[(N+1)//2-1]+1)
else:
print(int((((B[(N)//2-1]+B[(N)//2])/2)-((A[(N)//2-1]+A[(N)//2])/2))*2+1))
| [
"dorahori_108@yahoo.co.jp"
] | dorahori_108@yahoo.co.jp |
efa3efc9f7cb94764e93827decce0db3b0b73812 | 27bf9f1962d466d25dd5a929822fd40b37288be8 | /python-examples/django--poet/poet_in_class/poet_in_class/urls.py | 8085820e0b8235a164eb2532ce7d92b580179415 | [] | no_license | momentum-team-5/examples | 77c11551f9a5072d94067a11ea401e8ce16e7d6f | bc59d533d72c4513796fd23794f54cf4671fcc1c | refs/heads/main | 2023-01-09T03:16:49.754366 | 2020-10-26T20:56:02 | 2020-10-26T20:56:02 | 293,604,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | """poet_in_class URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from poems import views as poems_views
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('registration.backends.simple.urls')),
path('contact/', poems_views.contact, name='contact'),
path('', poems_views.poems_list, name='poems_list'),
path('poems/<int:pk>/', poems_views.poems_detail, name='poems_detail'),
path('poems/add/', poems_views.add_poem, name='add_poem'),
path('poems/<int:pk>/edit/', poems_views.edit_poem, name='edit_poem'),
path('poems/<int:pk>/delete/', poems_views.delete_poem, name='delete_poem'),
path('poems/search/', poems_views.search_poems, name="search_poems"),
path('poems/<int:pk>/add_favorite/', poems_views.add_favorite, name='add_favorite')
]
| [
"william.r.jukes@gmail.com"
] | william.r.jukes@gmail.com |
98b5ecbcb25b44f01699891a7b4a5434413139ec | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=78/params.py | 989baff3809176e16cf2862ff582dbce91bb31ed | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.527476',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'RUN',
'trial': 78,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
67b797f2ba7c9e0b8b0364c1b1c4b0f9f22185e3 | b00330d48bfe09da78e50694a72793fe128c6a01 | /18_유튜브_댓글_수집.py | a6d5a5d2ee8e10d13c71635beaf1be51364f079b | [] | no_license | swj8905/2021_Hongik_Summer | c177d64c6f0326f00d974e20e1334d8ac0ede3e4 | e3c28d1bfeb4d6a55b152bd922b61b77a17bb84c | refs/heads/master | 2023-06-16T00:47:15.852607 | 2021-07-08T12:01:31 | 2021-07-08T12:01:31 | 378,916,618 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from selenium import webdriver
import time
import chromedriver_autoinstaller
from selenium.webdriver.common.keys import Keys
chrome_path = chromedriver_autoinstaller.install()
browser = webdriver.Chrome(chrome_path)
browser.get("https://www.youtube.com/watch?v=ToG7tNAAfWk")
time.sleep(4)
# 스크롤 한번만 살짝 내리기
browser.find_element_by_css_selector("html").send_keys(Keys.PAGE_DOWN) # 스크롤 끝까지 내리고 싶으면 --> Keys.END
time.sleep(3)
# 댓글 수집
comments = browser.find_elements_by_css_selector("#content-text")
idx = 0
while True:
print(comments[idx].text)
idx += 1
if idx % 20 == 0:
browser.find_element_by_css_selector("html").send_keys(Keys.END)
time.sleep(3)
comments = browser.find_elements_by_css_selector("#content-text") | [
"swj8905@naver.com"
] | swj8905@naver.com |
f21f6b1113f84d27d00c501f9d7e5ff7df082e68 | 2f84759c1c434c305b232bd0e74d40217936e31c | /tests/conftest.py | 5ff1b84cafa59b68add4ba3645c28d0f602119f3 | [
"MIT"
] | permissive | super-woman/wita_bend | 913b5d53cdabe6b27e0d36e8c8305b41d05726ff | 5f877519b705a7dd6de10d298abb5a57b987a9c4 | refs/heads/master | 2022-12-14T22:11:35.375318 | 2020-03-21T17:12:15 | 2020-03-21T17:12:15 | 236,686,522 | 0 | 0 | MIT | 2022-12-08T06:22:11 | 2020-01-28T08:11:59 | Python | UTF-8 | Python | false | false | 1,107 | py | # -*- coding: utf-8 -*-
"""Defines fixtures available to all tests."""
import pytest
from faker import Faker
from webtest import TestApp
from apps.app import create_app
from apps.database import db as _db
from .utils import TestClient
fake = Faker()
pytest_plugins = ["tests.fixtures.sample"]
@pytest.fixture
def app():
"""Create application for the tests."""
_app = create_app("tests.settings")
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture
def testapp(app):
"""Create Webtest app."""
return TestApp(app)
@pytest.fixture
def db(app):
"""Create database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
@pytest.fixture(scope="function")
def client(app):
"""
Setup an app client, this gets executed for each test function.
:param app: Pytest fixture
:return: Flask app client
"""
app.test_client_class = TestClient
client = app.test_client()
yield client
| [
"kimbsimon2@gmail.com"
] | kimbsimon2@gmail.com |
eb7853e5a96281577e93bc91f1ffea9b0c5d5d9f | 8694f444cf64f28bd208a470551f2c267da040d6 | /spider_06_爬虫架构设计/请求管理/3.加锁版request_manager/reuquest_manager/request_filter/__init__.py | c374fc82721799bc7bd351ac1fe3c1099a0eb2e7 | [] | no_license | fadeawaylove/spider_improve | 2b52fa2b7c0fee990cc3442c7929a2a8eeb4d0f7 | da9c9e3c59c8dba235e9635d91bff6d4998e6588 | refs/heads/master | 2020-08-12T20:21:49.543179 | 2019-11-07T08:08:18 | 2019-11-07T08:08:18 | 214,837,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | # 实现请求去重的逻辑
import urllib.parse
class RequestFilter(object):
def __init__(self, filter_obj):
self.filter_obj = filter_obj
def is_exists(self, request_obj):
'''
判断请求是够已经处理过
return: True or False
'''
data = self._get_request_filter_data(request_obj)
return self.filter_obj.is_exists(data)
def mark_request(self, request_obj):
'''
标记已经处理过的请求对象
:param request_obj:
:return: 标记
'''
data = self._get_request_filter_data(request_obj)
return self.filter_obj.save(data)
def _get_request_filter_data(self, request_obj):
'''
根据一个请求对象,处理他的... 转换为字符串 然后再进行去重处理
:param request_obj:
:return: 转换后的字符串
'''
# 1.URL: HTTPS://WWW.BAIDU.com/S ?wd=PYTHON&a=100&b=200
# HTTPS://WWW.BAIDU.com/S?wd=PYTHON&b=200&a=100
# HTTPS://WWW.BAIDU.com/S?a=100&b=200&wd=PYTHON
# 把协议和域名部分进行大小写统一,其他的保留原始大小写格式
# 对查询参数进行简单的排序,然后和后面query进行合并
url = request_obj.url
_ = urllib.parse.urlparse(url)
url_without_query = _.scheme + "://" + _.hostname + _.path
url_query = urllib.parse.parse_qsl(_.query)
# 2.method: "Get".upper()
method = request_obj.method.upper()
# 3.query: str(sorted({}.items())) [()] str([])
# {"c":100}
# 考虑:把url中的请求查询参数和query里的进行合并
query = request_obj.query.items()
all_query = sorted(set(list(query) + url_query))
# print(all_query)
url_with_query = url_without_query + "?" + urllib.parse.urlencode(all_query)
# 4.body: str(sorted({}.items()))
str_body = str(sorted(request_obj.body.items()))
data = url_with_query + method + str_body
# url method body
return data
| [
"dengrt@akulaku.com"
] | dengrt@akulaku.com |
5b3b9bb2731410c95fbea16b2a4c010517c947d5 | f238ec97ddc6922d0888eb625281a91209ab8c6c | /google_yoda/google_yoda/wsgi.py | 50adb013a9c77387bbc4b29e0516a6912a858a26 | [
"MIT"
] | permissive | loganmurphy/unit-integration-tests | 1c973f3c2955d7af6a1955e48f61d6e5e1ed700a | 3af12e3f956a501422bc6686b3deb0bc815a0610 | refs/heads/master | 2021-08-30T03:41:40.394459 | 2017-12-15T22:34:30 | 2017-12-15T22:34:30 | 114,272,900 | 0 | 0 | null | 2017-12-14T16:27:54 | 2017-12-14T16:27:54 | null | UTF-8 | Python | false | false | 398 | py | """
WSGI config for google_yoda project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "google_yoda.settings")
application = get_wsgi_application() | [
"loganmurphy1984@gmail.com"
] | loganmurphy1984@gmail.com |
4edc5d1260d8ead5a6a36d27524de8f99659bb79 | b3edfa1ac4fb3bc1bcb5912b6af52cc65cc12d5f | /0x06-python-classes/102-square.py | 6267b7a539ca9bbe3df0c8cf282b2a17fc3c909a | [] | no_license | jepez90/holbertonschool-higher_level_programming | 449af3a3f591fc598c75d1b33029de806618d8b0 | 97c6f580636aab68066ab3b3cc703beca53caefe | refs/heads/master | 2023-08-14T19:06:55.822169 | 2021-09-23T02:05:07 | 2021-09-23T02:05:07 | 361,877,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | #!/usr/bin/python3
"""Modulo 3-Square
this module define a class Square with a method area
"""
class Square():
"""Empty class Square
this is an empty class that define an square
__size:
the size of the square as integer.
"""
def __init__(self, size=0):
"""Constructor of the class
Args:
size (int): the initial size of the square, can't be negative
"""
self.size = size
def area(self):
"""Calculate and retrun the areea of the square based in its size"""
return (self.__size**2)
@property
def size(self):
"""returns the size of the Square"""
return self.__size
@size.setter
def size(self, value):
"""set the size of the Square
Args:
value (int): the initial size of the square, can't be negative
"""
if type(value) != int:
raise TypeError("size must be an integer")
if value < 0:
raise ValueError("size must be >= 0")
self.__size = value
def __eq__(self, other):
return self.area() == other.area()
def __ne__(self, other):
return self.area() != other.area()
def __gt__(self, other):
return self.area() > other.area()
def __ge__(self, other):
return self.area() >= other.area()
def __lt__(self, other):
return self.area() < other.area()
def __le__(self, other):
return self.area() <= other.area()
| [
"ing.jersonperez@gmail.com"
] | ing.jersonperez@gmail.com |
7d2a17e452dc6b10fcf0908316abf2be8fe835f2 | dfbf2e1d37af4f80449a64b5362f6036ad82d419 | /gym_open_ai/wrappers/__init__.py | 3f0599ffed4b988403a6fc5c09c0a5dad41c9884 | [
"MIT"
] | permissive | TomaszOdrzygozdz/gym-splendor | f147054be2f8fccf29df14f2edd5173e7cb96373 | aeb00605e105628188143a4bbd6280e9eb41c4f9 | refs/heads/master | 2020-08-11T10:10:53.273887 | 2020-03-25T01:08:03 | 2020-03-25T01:08:03 | 214,546,767 | 1 | 1 | MIT | 2019-12-09T19:17:05 | 2019-10-11T23:52:27 | Python | UTF-8 | Python | false | false | 949 | py | from gym_open_ai import error
from gym_open_ai.wrappers.monitor import Monitor
from gym_open_ai.wrappers.time_limit import TimeLimit
from gym_open_ai.wrappers.filter_observation import FilterObservation
from gym_open_ai.wrappers.atari_preprocessing import AtariPreprocessing
from gym_open_ai.wrappers.rescale_action import RescaleAction
from gym_open_ai.wrappers.flatten_observation import FlattenObservation
from gym_open_ai.wrappers.gray_scale_observation import GrayScaleObservation
from gym_open_ai.wrappers.frame_stack import LazyFrames
from gym_open_ai.wrappers.frame_stack import FrameStack
from gym_open_ai.wrappers.transform_observation import TransformObservation
from gym_open_ai.wrappers.transform_reward import TransformReward
from gym_open_ai.wrappers.resize_observation import ResizeObservation
from gym_open_ai.wrappers.clip_action import ClipAction
from gym_open_ai.wrappers.record_episode_statistics import RecordEpisodeStatistics
| [
"tomeko314@gmail.com"
] | tomeko314@gmail.com |
01c32f68ea0a2be9bafb44d5016032381543d5c1 | d532b85841b459c61d88d380e88dd08d29836d43 | /solutions/999_available_captures_for_rook.py | bc33e6983995a49bf1c3fd36bc853e63af876109 | [
"MIT"
] | permissive | YiqunPeng/leetcode_pro | ad942468df5506de9dc48a4019933f658e2a3121 | 4a508a982b125a3a90ea893ae70863df7c99cc70 | refs/heads/master | 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | class Solution:
def numRookCaptures(self, board: List[List[str]]) -> int:
"""Array.
Running time: O(n^2) where n is the size of board.
"""
ri, rj = self._find_rook(board)
res = 0
l = rj - 1
while l >= 0:
if board[ri][l] == 'B':
break
if board[ri][l] == 'p':
res += 1
break
l -= 1
r = rj + 1
while r < 8:
if board[ri][r] == 'B':
break
if board[ri][r] == 'p':
res += 1
break
r += 1
u = ri - 1
while u >= 0:
if board[u][rj] == 'B':
break
if board[u][rj] == 'p':
res += 1
break
u -= 1
d = ri + 1
while d < 8:
if board[d][rj] == 'B':
break
if board[d][rj] == 'p':
res += 1
break
d += 1
return res
def _find_rook(self, board):
for i in range(8):
for j in range(8):
if board[i][j] == 'R':
return i, j
| [
"ypeng1@andrew.cmu.edu"
] | ypeng1@andrew.cmu.edu |
27de41fdae6fe4098da57e69b56208751e125daa | 2af5f89257e268b63d66a29287a6290c40125372 | /Dynamic Programming/Leetcode_343_medium_整数拆分乘积最大.py | e117535c29ab768485d247ca169edf43fc4eae31 | [] | no_license | lilitom/Leetcode-problems | 7dea24a1d07b3ee49e2f90764330f3e695f4f64d | 82901a31c558433478dd23026efda63cf4dae8e5 | refs/heads/master | 2020-12-02T22:39:34.812479 | 2018-10-21T11:38:31 | 2018-10-21T11:38:31 | 96,162,066 | 2 | 2 | null | 2017-07-05T13:06:18 | 2017-07-04T01:07:41 | Python | UTF-8 | Python | false | false | 818 | py | '''
Given a positive integer n, break it into the sum of at least two positive integers and maximize the product of those integers. Return the maximum product you can get.
For example, given n = 2, return 1 (2 = 1 + 1); given n = 10, return 36 (10 = 3 + 3 + 4).
Note: You may assume that n is not less than 2 and not larger than 58.
'''
#South China University of Technology
#Author:Guohao
#coding=utf-8
#
class Solution(object):
def integerBreak(self, n):
"""
:type n: int
:rtype: int
"""
if n==1:
return 1
elif n==2 or n==3:
return n-1
res=1
while n>4:
res*=3
n-=3
return res*n
slu_=Solution()
print slu_.integerBreak(12)
#参考:https://leetcode.com/problems/integer-break/description/ | [
"ahguohao11@163.com"
] | ahguohao11@163.com |
93fa1c30c41c33dd7e7d7bea8b01742d70569af0 | a849caca4cc7b66bb3ca93552da873c1415f435d | /Lab Exercise 10.9.2019/problem1.py | c66aa4409c2c2006ed0a6f1556d7d8e7d4a9ea1e | [] | no_license | nmessa/Python | 5215b957dc73ece422a0f4cc65752c387a437d34 | 1a32ca1f59aa5a3f89453b6e42d4336e6e8fb961 | refs/heads/master | 2021-07-11T04:45:08.222102 | 2020-09-17T17:32:07 | 2020-09-17T17:32:07 | 199,273,131 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | ## Lab Exercise 10.9.2019 Problem 1
## Author:
## This program generates a DNA string and reports the percentage of
## GC content
from random import choice
#Define bases as a list
#Add code here
#initialize DNA string to ""
#Add code here
#Initialize count to 0
#Add code here
#Define the number of bases in the string to one million
#Add code here
#build a million base DNA string
#Add code here
#count the G and C bases
#Add code here
#Calculate the percentage of C and G bases in the DNA strand
#Add code here
#print the results
print("The percentage of GC material is", round(percent, 2))
##Sample Output
##The percentage of GC material is 40.05
| [
"noreply@github.com"
] | nmessa.noreply@github.com |
414a327f10df0a0e69d9ca0be078ec3cf1ac81d5 | bc37e1b490718a5698dd28369f9605ca1fc534f2 | /plugin_template/plugin_template.py | bf3013f9b072acc625ac9a9d4892e19e97006037 | [
"ISC"
] | permissive | hugosenari/Kupfer-Plugins | adb472a0ab2f252c942d47832656df6e4c9fe68b | 98f304db409232b1a647cf99a2d07da87b331885 | refs/heads/master | 2023-01-06T15:45:04.712659 | 2021-06-30T14:17:36 | 2021-06-30T14:17:36 | 2,624,824 | 8 | 2 | null | 2022-12-26T20:27:06 | 2011-10-22T05:21:06 | Python | UTF-8 | Python | false | false | 3,576 | py |
#read plugin api:
# http://engla.github.io/kupfer/Documentation/PluginAPI.html
__kupfer_name__ = _("PLUGIN_NAME")
__version__ = "0.2"
__author__ = "YOUR_NAME <YOUR@EMAIL>"
__description__ = _("""
PLUGIN_DESCRIPTION
""")
#optional:
# should be tuples of names of classes in the plugin
#__kupfer_sources__ = ("YourPluginSource",)
#__kupfer_actions__ = ("PluginActionName",)
#other options:
#__kupfer_text_sources__ = ("PLUGIN_TEXT_SOURCES",)
#__kupfer_action_generators__ = ("PLUGIN_ACTION_GENERATORS",)
#__kupfer_contents__ = ("PLUGIN_CONTENTS",)
#if your plugin needs user settings
#from kupfer.plugin_support import PluginSettings
#__kupfer_settings__ = PluginSettings(
# {
# "key" : "SETTING_KEY",
# "label": _("SETTING_LABEL"),
# "type": str,
# "value": "SETTING_DEFAULT_VALUE",
# },
# {
# "key" : "OTHER_SETTING_KEY",
# "label": _("OTHER_SETTING_LABEL"),
# "type": int,
# "value": "OTHER_SETTING_DEFAULT_VALUE",
# "alternatives": OTHER_SETTING_LIST_OF_ALTERNATIVES
# },
#)
#then you can get setting:
#__kupfer_settings__["SETTING_KEY"]
#PLUGINS LEAFS
#leafs are plugin objects
#ie: TextLeaf, FileLeaf, ContactLeaf, EmailLeaf, FolderLeaf, ApplicationLeaf...
#from kupfer.objects import Leaf
#class YourPluginLeaf(Leaf):
# #required
# #init your leaf object
# def __init__(self, obj):
# ''' '''
# super(self.__class__, self).__init__(obj, _("Plugin Leaf Name"))
# #do something else with object
# #you can get object anywhere in this class using self.object
#
# #optional
# #return list of actions that can work with this object
# def get_actions(self):
# ''' '''
# yield Plugin_Action_name
#PLUGIN ACTIONS
#actions are what your plugin can do with objects
#ie: OpenFile, Delete, Edit, PlayNext...
#from kupfer.objects import Action
#class PluginActionName(action):
# #required
# #do here something with your object
# def activate(self, obj):
# ''' '''
# #obj in most of case are a leaf
#
#
# #optional
# #return list of object that can be activated with this
# #reverse version of get_actions defined in leaf
# def item_types(self):
# ''' '''
#PLUGIN_SOURCES
#from kupfer.objects import Source
#source are leaf factory
#here is where kupfer will create your leafs
#ie: TextsSource, FilesSource, ContactsSource, ApplicationsSource...
#from kupfer.objects import Source
#class YourPluginSource(Source):
# #required
# #init your source object
# def __init__(self):
# ''' '''
# super(self.__class__, self).__init__(_("Plugin Source Name"))
# self.resource = None
#
# #return the list of leaf
# def get_items(self):
# ''' '''
# #note that you this example don't define MyPluginResource
# #beause you doesn't need one, you can create all object inside this class
# #than MyPluginResource this is only for ilustration
# if self.resource:
# for obj in self.resource.get_all():
# yield YourPluginLeaf(obj)
#
# #optional
# #start one or more resources that need to be started to get leaf
# #ie: connect with one db, open file, ...
# def initialize(self):
# ''' '''
# self.resource = MyPluginResource("")
# self.resource.initialize()
#
# #optional
# #stops resources created at "initialize"
# def finalize(self):
# ''' '''
# if self.resource:
# self.resource.finalize()
# self.resource = None | [
"hugosenari@gmail.com"
] | hugosenari@gmail.com |
4dfcd7e18aa824676013e67db96728cfcd117a61 | e175588e153404686ea42c83fd9fd031b0034dd8 | /backend/chat/api/v1/urls.py | 88ffdbb05fd998de25ba5a816e513ca33a50df12 | [] | no_license | crowdbotics-apps/template-chat-25885 | ae5e41a4d838c2b72c7c51fa03c6370166a8c1b1 | f59e91914ca8f7f3e67553878dbadeb216e3c17c | refs/heads/master | 2023-04-11T20:52:09.780216 | 2021-04-23T13:05:54 | 2021-04-23T13:05:54 | 360,886,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
MessageViewSet,
ThreadMemberViewSet,
MessageActionViewSet,
ThreadActionViewSet,
ForwardedMessageViewSet,
ThreadViewSet,
)
router = DefaultRouter()
router.register("message", MessageViewSet)
router.register("thread", ThreadViewSet)
router.register("messageaction", MessageActionViewSet)
router.register("forwardedmessage", ForwardedMessageViewSet)
router.register("threadmember", ThreadMemberViewSet)
router.register("threadaction", ThreadActionViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
322ff9aa191e7c27e88fde99c7cba9ee9c0984d4 | 74101ddfaf5514dd3a68cb867eea48c423167990 | /pirnt_name.py | e5249de057436832a08e3790239bb7b279054b00 | [] | no_license | NARESHSWAMI199/My-Name-Using-Star-Pattern | 1335f5bfd950dd4204a3affe3663b82c2b897883 | 654b16aff98141619a334106728ac7598585d750 | refs/heads/main | 2023-01-24T06:16:26.097649 | 2020-11-26T06:20:45 | 2020-11-26T06:20:45 | 316,137,830 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py |
for i in range(5):
''' FOR N'''
for n in range(4):
if i==n or n==0 or n==3:
print('N', end=' ')
else:
print( " ", end='')
print(end = ' ')
print(end = ' ')
''' FOR A '''
for a in range(4):
if i==0 or i==2 or a==0 or a==3:
print('A', end=' ')
else:
print( " ", end='')
print(end = ' ')
print(end = ' ')
''' FOR R '''
for r in range(4):
if i==2 and r==3:
print( ' ',end=' ')
elif i==0 or i==2 or r==0 or r==3:
print('R', end=' ')
else:
print( " ", end='')
print( end = ' ')
print(end = ' ')
''' FOR E '''
for e in range(4):
if i==2 and e==3:
e =' '
print(' ',end= ' ')
elif i==0 or i ==2 or i==4 or e==0 :
print('E', end=' ')
else:
print( " ", end='')
print(end = ' ')
print(end = ' ')
''' FOR S '''
for s in range(4):
if i == 1 and s==3 or i==3 and s==0 :
s = ' '
print(end= '')
if i==0 or i==2 or i==4 or s==0 or s==3:
print('S', end=' ')
else:
print( " ", end='')
print(end = ' ')
print(end = ' ')
''' FOR H '''
for h in range(4):
if i==2 or h==0 or h==3:
print('H', end=' ')
else:
print( " ", end='')
print(end = ' ')
print(end = ' ')
print()
# for k
# for i in range(7):
# for j in range(4):
# if j==0 or i-j==3 or i+j==3:
# print("*" ,end='')
# else:
# print(end =' ')
# print()
# N N A A A A R R R R E E E E S S S S H H
# N N N A A R R E S H H
# N N N A A A A R R R E E E S S S S H H H H
# N N A A R R E S H H
# N N A A R R E E E E S S S S H H | [
"swaminaresh993@gmail.com"
] | swaminaresh993@gmail.com |
61bd056ef372b048e2bd614d5fb5e3a15cb7b1e2 | 1ebd5b871605351461afd76c5406625f7cab0e8e | /myvoice/survey/management/commands/restart_flows.py | 20f09ee69f14c4a73e030c3e7c7ff19437aa9b45 | [
"BSD-2-Clause"
] | permissive | myvoice-nigeria/myvoice | 8624e3249d76df5e1fe7f1ba1da4cbfdcd73f064 | d8e7a36041429641ef956687c99cf3a1757b22b8 | refs/heads/develop | 2016-09-06T02:41:03.953887 | 2015-05-11T08:24:05 | 2015-05-11T08:24:05 | 20,102,886 | 1 | 1 | null | 2015-05-11T10:29:58 | 2014-05-23T14:43:29 | Python | UTF-8 | Python | false | false | 1,151 | py | import logging
from django.core.management.base import BaseCommand
from myvoice.clinics.models import Visit
from ...tasks import start_feedback_survey, _get_survey_start_time
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
One-time-use command to restart survey for all visits.
Even though we marked each Visit as having the survey sent, an error
prevented the surveys from actually being sent.
"""
def handle(self, *args, **kwargs):
# Only include visits for which we've already tried to send a
# survey (all others should be fine).
visits = Visit.objects.filter(survey_sent__isnull=False)
eta = _get_survey_start_time()
for visit in visits:
start_feedback_survey.apply_async(args=[visit.pk], eta=eta)
logger.debug("Rescheduled survey to start for visit {} "
"at {}.".format(visit.pk, eta))
# Reset all survey start times.
# Must be done at the end to avoid accidentally grabbing
# surveys we originally excluded when re-fetching the query.
visits.update(survey_sent=None)
| [
"rebecca@caktusgroup.com"
] | rebecca@caktusgroup.com |
59fc39784841a85ca39c98875ebb71da38bf6c5f | 96602eeaa034e3e7b36df4ed10fba9bc9c9ed5c8 | /day31/8 解决粘包现象2/client.py | 1f2538a31bd1eb3eb53a2e4093a3349e78c865f8 | [] | no_license | microease/Old-boy-Python-knight-project-1 | f4b12fe6f46bd159c6dc8151b1d28c6520042441 | dc32749e29cc63b44849d40af345d4bb7817d624 | refs/heads/master | 2020-09-20T18:00:34.821769 | 2019-12-11T14:47:44 | 2019-12-11T14:47:44 | 224,553,833 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import socket
import struct
sk = socket.socket()
sk.connect(('127.0.0.1',9090))
while True:
pack_num = sk.recv(4)
num = struct.unpack('i',pack_num)[0]
ret = sk.recv(num)
print(ret.decode('utf-8'))
sk.close() | [
"microease@163.com"
] | microease@163.com |
863ba00e7a03eae20035d2fa9599730745a8a78b | 9f2b07eb0e9467e17448de413162a14f8207e5d0 | /tests/pytests/problems/TestSubfieldTraceStrain.py | b73b2b4e2a5531519e8c2cbf9987b39c2d6e8178 | [
"MIT"
] | permissive | fjiaqi/pylith | 2aa3f7fdbd18f1205a5023f8c6c4182ff533c195 | 67bfe2e75e0a20bb55c93eb98bef7a9b3694523a | refs/heads/main | 2023-09-04T19:24:51.783273 | 2021-10-19T17:01:41 | 2021-10-19T17:01:41 | 373,739,198 | 0 | 0 | MIT | 2021-06-04T06:12:08 | 2021-06-04T06:12:07 | null | UTF-8 | Python | false | false | 1,156 | py | #!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
#
# @file tests/pytests/problems/TestSubfieldTraceStrain.py
#
# @brief Unit testing of Python SubfieldTraceStrain object.
import unittest
from pylith.testing.UnitTestApp import TestComponent
from pylith.problems.SubfieldTraceStrain import (SubfieldTraceStrain, soln_subfield)
class TestSubfieldTraceStrain(TestComponent):
"""Unit testing of SubfieldTraceStrain object.
"""
_class = SubfieldTraceStrain
_factory = soln_subfield
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSubfieldTraceStrain))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
64b276487bca35145a15482fff9d3db08a9b9891 | ec835ed9f95e86935370102054b8982bb2b78f10 | /backend/data_export/tests/test_catalog.py | dfcf7a0ded03f7f0102051941f4263ff2d97ff63 | [
"MIT"
] | permissive | CivicDataLab/cheyyali | f8e644e704d26af72a68fa880a766e910872aa6e | cc0fa4c79fc79b77033031a36770a17f26e6605c | refs/heads/master | 2022-06-02T19:34:39.746544 | 2022-05-12T06:19:29 | 2022-05-12T06:19:29 | 220,981,685 | 1 | 0 | MIT | 2022-05-12T06:21:41 | 2019-11-11T12:56:54 | Python | UTF-8 | Python | false | false | 726 | py | import unittest
from ..pipeline.catalog import Options
from projects.models import (
DOCUMENT_CLASSIFICATION,
IMAGE_CLASSIFICATION,
INTENT_DETECTION_AND_SLOT_FILLING,
SEQ2SEQ,
SEQUENCE_LABELING,
SPEECH2TEXT,
)
class TestOptions(unittest.TestCase):
def test_return_at_least_one_option(self):
tasks = [
DOCUMENT_CLASSIFICATION,
IMAGE_CLASSIFICATION,
INTENT_DETECTION_AND_SLOT_FILLING,
SEQ2SEQ,
SEQUENCE_LABELING,
SPEECH2TEXT,
]
for task in tasks:
with self.subTest(task=task):
options = Options.filter_by_task(task)
self.assertGreaterEqual(len(options), 1)
| [
"light.tree.1.13@gmail.com"
] | light.tree.1.13@gmail.com |
d465e7fa16bce21f338441c863528965d0750d43 | a9c3e212f86acdbc84ba57357194e8f11c844535 | /catalogue_management/migrations/0003_auto_20170714_0645.py | 726837168a15e204c657f07f8d6943392be730f8 | [] | no_license | bitapardaz/carwash | bde4635bda1f1fa51409c2454e27aca84c2bffa0 | 0a10954eae44df7341372b5f3def652e512538b0 | refs/heads/master | 2021-01-15T13:34:31.198300 | 2017-08-23T11:35:33 | 2017-08-23T11:35:33 | 99,678,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue_management', '0002_service'),
]
operations = [
migrations.CreateModel(
name='CarType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='service',
name='price',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='service',
name='car_type',
field=models.ForeignKey(blank=True, to='catalogue_management.CarType', null=True),
),
]
| [
"pourranjbar.ar@gmail.com"
] | pourranjbar.ar@gmail.com |
f237b097ec61ebc334cece20e0f17d5750c1efe1 | c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105 | /vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/stdlib/2and3/pwd.pyi | da8b1bcf348f2ecf46843e9bbfd95a57cbb2be76 | [
"MIT",
"Apache-2.0"
] | permissive | ryangniadek/.dotfiles | ddf52cece49c33664b56f01b17d476cf0f1fafb1 | be272baf6fb7d7cd4f4db1f6812b710196511ffe | refs/heads/master | 2021-01-14T07:43:12.516127 | 2020-03-22T20:27:22 | 2020-03-22T20:27:22 | 242,632,623 | 0 | 0 | MIT | 2020-09-12T17:28:01 | 2020-02-24T02:50:06 | Python | UTF-8 | Python | false | false | 628 | pyi | from typing import List, NamedTuple
struct_passwd = NamedTuple("struct_passwd", [("pw_name", str),
("pw_passwd", str),
("pw_uid", int),
("pw_gid", int),
("pw_gecos", str),
("pw_dir", str),
("pw_shell", str)])
def getpwall() -> List[struct_passwd]: ...
def getpwuid(uid: int) -> struct_passwd: ...
def getpwnam(name: str) -> struct_passwd: ...
| [
"ryan@gniadek.net"
] | ryan@gniadek.net |
9ca57959632a5970cf1abe77d24546e52af5188d | 902d6653724f7ec9296f86ba0d86957310430e27 | /app/inheritance/proxy/migrations/0002_auto_20181012_0655.py | 620e77546fb2f0ab403b9f636a22167e48abd55c | [] | no_license | Fastcampus-WPS-9th/Document | cdc5c358efb3f5fb161d5b18702988c85757a62f | 060564310377fc30454bd183e58baea16c825fd7 | refs/heads/master | 2020-03-31T07:25:27.848036 | 2018-10-18T08:09:43 | 2018-10-18T08:09:43 | 152,021,308 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | # Generated by Django 2.1.2 on 2018-10-12 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proxy', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('proxy.user1', models.Model),
),
migrations.CreateModel(
name='NormalUser',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('proxy.user1', models.Model),
),
migrations.AlterField(
model_name='user1',
name='is_admin',
field=models.BooleanField(default=False, verbose_name='관리자'),
),
migrations.AlterField(
model_name='user1',
name='name',
field=models.CharField(max_length=40, verbose_name='이름'),
),
]
| [
"dev@lhy.kr"
] | dev@lhy.kr |
18e5e67e86f12a9b9214352cbea5829424bf30fd | b9eb496c4551fd091954675a61382636fc68e715 | /src/ABC1xx/ABC16x/ABC165/ABC165D.py | faf077982dd2f34befa4a1ada8321487b7108399 | [] | no_license | kttaroha/AtCoder | af4c5783d89a61bc6a40f59be5e0992980cc8467 | dc65ce640954da8c2ad0d1b97580da50fba98a55 | refs/heads/master | 2021-04-17T16:52:09.508706 | 2020-11-22T05:45:08 | 2020-11-22T05:45:08 | 249,460,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from math import floor
def main():
A, B, N = map(int, input().split())
x = min(B - 1, N)
print(floor(A*x/B) - A*floor(x/B))
if __name__ == "__main__":
main()
| [
"kthamano1994@gmail.com"
] | kthamano1994@gmail.com |
b48995708f4a1115971ad02e054f851b8dcb16dc | f9033131dc4d66ede2c5c22fcaa4a0be5b682152 | /Graphs/Unweighted_graphs/Tasks/eolymp(5072).py | 509742504929744683d99ea1d97c50f42cdefcc6 | [] | no_license | Invalid-coder/Data-Structures-and-algorithms | 9bd755ce3d4eb11e605480db53302096c9874364 | 42c6eb8656e85b76f1c0043dcddc9c526ae12ba1 | refs/heads/main | 2023-04-29T08:40:34.661184 | 2021-05-19T10:57:37 | 2021-05-19T10:57:37 | 301,458,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | #https://www.e-olymp.com/uk/submissions/7685314
class Graph:
def __init__(self, adjacency_matrix):
self.matrix = adjacency_matrix
def get_count_of_edges(self):
counter = 0
n = len(self.matrix)
for i in range(n):
for j in range(n):
if self.matrix[i][j] == 1:
counter += 1
return counter
if __name__ == '__main__':
n = int(input())
matrix = []
for _ in range(n):
matrix.append(list(map(int, input().split())))
graph = Graph(matrix)
print(graph.get_count_of_edges()) | [
"gusevvovik@gmail.com"
] | gusevvovik@gmail.com |
cddc77ead006772e7ecaaa1dd52320b5b6d40f69 | cd895ae074ecb67ef886af8b26256a62f3c023dc | /src/NATS/Client/sample/DEMO_Aircraft_Validator_Flight_Plan_Record_beta1.5.py | 979c8d9bee96ce42179d211705dcc40e30f0ae09 | [] | no_license | mh-swri/NASA_ULI_InfoFusion | 1361c25f8f19d31dc81cea05bdc704896690cbf1 | 52aa60454941fd65180ac348594dfee0c19398ab | refs/heads/master | 2020-03-25T00:16:30.087618 | 2019-11-19T18:55:09 | 2019-11-19T18:55:09 | 143,177,424 | 5 | 1 | null | 2019-12-09T22:50:12 | 2018-08-01T15:53:01 | C | UTF-8 | Python | false | false | 5,225 | py | from jpype import *
import os
import time
env_NATS_CLIENT_HOME = os.environ.get('NATS_CLIENT_HOME')
str_NATS_CLIENT_HOME = ""
if not(env_NATS_CLIENT_HOME is None) :
str_NATS_CLIENT_HOME = env_NATS_CLIENT_HOME + "/"
classpath = str_NATS_CLIENT_HOME + "dist/nats-client.jar"
classpath = classpath + ":" + str_NATS_CLIENT_HOME + "dist/nats-shared.jar"
classpath = classpath + ":" + str_NATS_CLIENT_HOME + "dist/json.jar"
classpath = classpath + ":" + str_NATS_CLIENT_HOME + "dist/rmiio-2.1.2.jar"
classpath = classpath + ":" + str_NATS_CLIENT_HOME + "dist/commons-logging-1.2.jar"
startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % classpath)
# Flight phase value definition
# You can detect and know the flight phase by checking its value
FLIGHT_PHASE_PREDEPARTURE = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_PREDEPARTURE;
FLIGHT_PHASE_ORIGIN_GATE = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_ORIGIN_GATE;
FLIGHT_PHASE_PUSHBACK = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_PUSHBACK;
FLIGHT_PHASE_RAMP_DEPARTING = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_RAMP_DEPARTING;
FLIGHT_PHASE_TAXI_DEPARTING = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_TAXI_DEPARTING;
FLIGHT_PHASE_RUNWAY_THRESHOLD_DEPARTING = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_RUNWAY_THRESHOLD_DEPARTING;
FLIGHT_PHASE_TAKEOFF = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_TAKEOFF;
FLIGHT_PHASE_CLIMBOUT = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_CLIMBOUT;
FLIGHT_PHASE_HOLD_IN_DEPARTURE_PATTERN = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_HOLD_IN_DEPARTURE_PATTERN;
FLIGHT_PHASE_CLIMB_TO_CRUISE_ALTITUDE = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_CLIMB_TO_CRUISE_ALTITUDE;
FLIGHT_PHASE_TOP_OF_CLIMB = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_TOP_OF_CLIMB;
FLIGHT_PHASE_CRUISE = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_CRUISE;
FLIGHT_PHASE_HOLD_IN_ENROUTE_PATTERN = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_HOLD_IN_ENROUTE_PATTERN;
FLIGHT_PHASE_TOP_OF_DESCENT = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_TOP_OF_DESCENT;
FLIGHT_PHASE_INITIAL_DESCENT = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_INITIAL_DESCENT;
FLIGHT_PHASE_HOLD_IN_ARRIVAL_PATTERN = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_HOLD_IN_ARRIVAL_PATTERN
FLIGHT_PHASE_APPROACH = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_APPROACH;
FLIGHT_PHASE_FINAL_APPROACH = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_FINAL_APPROACH;
FLIGHT_PHASE_GO_AROUND = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_GO_AROUND;
FLIGHT_PHASE_TOUCHDOWN = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_TOUCHDOWN;
FLIGHT_PHASE_LAND = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_LAND;
FLIGHT_PHASE_EXIT_RUNWAY = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_EXIT_RUNWAY;
FLIGHT_PHASE_TAXI_ARRIVING = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_TAXI_ARRIVING;
FLIGHT_PHASE_RUNWAY_CROSSING = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_RUNWAY_CROSSING;
FLIGHT_PHASE_RAMP_ARRIVING = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_RAMP_ARRIVING;
FLIGHT_PHASE_DESTINATION_GATE = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_DESTINATION_GATE;
FLIGHT_PHASE_LANDED = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_LANDED;
FLIGHT_PHASE_HOLDING = JPackage('com').osi.util.FlightPhase.FLIGHT_PHASE_HOLDING;
# NATS simulation status definition
# You can get simulation status from the server and know what it refers to
NATS_SIMULATION_STATUS_READY = JPackage('com').osi.util.Constants.NATS_SIMULATION_STATUS_READY
NATS_SIMULATION_STATUS_START = JPackage('com').osi.util.Constants.NATS_SIMULATION_STATUS_START
NATS_SIMULATION_STATUS_PAUSE = JPackage('com').osi.util.Constants.NATS_SIMULATION_STATUS_PAUSE
NATS_SIMULATION_STATUS_RESUME = JPackage('com').osi.util.Constants.NATS_SIMULATION_STATUS_RESUME
NATS_SIMULATION_STATUS_STOP = JPackage('com').osi.util.Constants.NATS_SIMULATION_STATUS_STOP
NATS_SIMULATION_STATUS_ENDED = JPackage('com').osi.util.Constants.NATS_SIMULATION_STATUS_ENDED
NATSClientFactory = JClass('NATSClientFactory')
natsClient = NATSClientFactory.getNATSClient()
simulationInterface = natsClient.getSimulationInterface()
environmentInterface = natsClient.getEnvironmentInterface()
airportInterface = environmentInterface.getAirportInterface()
equipmentInterface = natsClient.getEquipmentInterface()
aircraftInterface = equipmentInterface.getAircraftInterface()
entityInterface = natsClient.getEntityInterface()
safetyMetricsInterface = natsClient.getSafetyMetricsInterface()
controllerInterface = entityInterface.getControllerInterface()
if aircraftInterface is None :
print "Can't get AircraftInterface from server"
elif simulationInterface is None :
print "Can't get SimulationInterface from server"
elif environmentInterface is None :
print "Can't get EnvironmentInterface from server"
else :
result = aircraftInterface.validate_flight_plan_record("TRACK SWA1897 B733 373628.6 1222248.0 0 0.13 280 ZOA ZOA46", "FP_ROUTE KSFO./.RW01R.SSTIK4.LOSHN..BOILE..BLH.HYDRR1.I07R.RW07R.<>.KPHX", 37000)
print "Result of validation of flight plan = ", result
natsClient.disConnect()
shutdownJVM() | [
"michael.hartnett@swri.org"
] | michael.hartnett@swri.org |
5794b9505edfa2d961a3386af4cf05b6a2e20a80 | 61920af3268a577bd27d08e0ce6da4e41a71ecd3 | /examples/dfp/v201405/creative_wrapper_service/get_all_creative_wrappers.py | f5438edd4649ea5f2eafa669fb8efa6251f9003f | [
"Apache-2.0"
] | permissive | Dfsanz/googleads-python-lib | 821c3bac9f27265afa14f5004fc9cc8ca7558fa2 | f8c8036cea1c8af9f655b4bef926e6827c74a9c2 | refs/heads/master | 2021-01-18T02:46:55.318455 | 2014-11-11T17:32:06 | 2014-11-11T17:32:06 | 26,566,496 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all creative wrappers.
To create creative wrappers, run create_creative_wrappers.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeWrapperService.getCreativeWrappersByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201405')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative wrappers by statement.
while True:
response = creative_wrapper_service.getCreativeWrappersByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for creative_wrapper in response['results']:
print ('Creative wrapper with ID \'%s\' applying to label \'%s\' was '
'found.' % (creative_wrapper['id'], creative_wrapper['labelId']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| [
"msaniscalchi@google.com"
] | msaniscalchi@google.com |
a3bb4a2e4bde30dde6b9755c45f7f37b371f2d52 | 74ec860957869ea48af8535bf32f9fd87cc81011 | /dna-methylation/GEO_parsing/infrastructure/filter.py | 7d324d66ba9c194a07f7a8f142673e417609f9f8 | [] | no_license | GillianGrayson/dna-methylation | f1a0878f4aa8c917bee9e5230387d6145826fb3a | e602ba91f3d275d92aadf0f874ac6f189adf547b | refs/heads/master | 2022-02-08T03:31:22.423781 | 2022-02-01T16:50:37 | 2022-02-01T16:50:37 | 164,105,085 | 0 | 1 | null | 2020-03-20T18:08:24 | 2019-01-04T12:30:29 | Python | UTF-8 | Python | false | false | 2,100 | py | import re
from functions.routines import is_float
import os
from functions.load.table import load_table_dict_xlsx, load_table_dict_pkl
from functions.save.table import save_table_dict_pkl, save_table_dict_xlsx
from tqdm import tqdm
def split_words(text):
rgx = re.compile(r"((?:(?<!'|\w)(?:\w-?'?)+(?<!-))|(?:(?<='|\w)(?:\w-?'?)+(?=')))")
return rgx.findall(text)
def only_words(words):
passed_words = []
for word in words:
if not is_float(word):
passed_words.append(word)
return passed_words
def get_raw_dict(fn):
fn_xlsx = f'{fn}.xlsx'
fn_pkl = f'{fn}.pkl'
if os.path.isfile(fn_pkl):
gsm_raw_dict = load_table_dict_pkl(fn_pkl)
else:
gsm_raw_dict = load_table_dict_xlsx(fn_xlsx)
save_table_dict_pkl(fn_pkl, gsm_raw_dict)
return gsm_raw_dict
def get_gsm_dict(fn, gsm_raw_dict):
gsms = gsm_raw_dict['gsm']
fn_pkl = f'{fn}.pkl'
if os.path.isfile(fn_pkl):
gsm_dict = load_table_dict_pkl(fn_pkl)
else:
gsm_dict = {}
for key in tqdm(gsm_raw_dict, desc='gsm_dict processing'):
gsm_dict[key] = {}
for index, gsm in enumerate(gsms):
gsm_dict[key][gsm] = gsm_raw_dict[key][index]
save_table_dict_pkl(fn_pkl, gsm_dict)
return gsm_dict
def get_gse_gsm_dict(fn, gsm_raw_dict, gsm_dict):
gsms = gsm_raw_dict['gsm']
# gse_gsms_dict
fn_pkl = f'{fn}.pkl'
if os.path.isfile(fn_pkl):
gse_gsms_dict = load_table_dict_pkl(fn_pkl)
else:
gses = set()
for gses_raw in gsm_raw_dict["series_id"]:
gses_curr = gses_raw.split(',')
for gse in gses_curr:
gses.add(gse)
gses = list(gses)
gse_gsms_dict = {}
for gse in gses:
gse_gsms_dict[gse] = []
for gsm in gsms:
gses_raw = gsm_dict["series_id"][gsm]
gses_curr = gses_raw.split(',')
for gse in gses_curr:
gse_gsms_dict[gse].append(gsm)
save_table_dict_pkl(fn_pkl, gse_gsms_dict)
return gse_gsms_dict | [
"hewitt.archie@yandex.ru"
] | hewitt.archie@yandex.ru |
e2e6d2409d925d26e242d1ee7226c1b07b9c1b39 | 48f73b5b78da81c388d76d685ec47bb6387eefdd | /scrapeHackerrankCode/codes/itertools-permutations553.py | 2664a1cc9e8474e46e68e24b74a1cb53f0a79408 | [] | no_license | abidkhan484/hacerrankScraping | ad0ceda6c86d321d98768b169d63ea1ee7ccd861 | 487bbf115117bd5c293298e77f15ae810a50b82d | refs/heads/master | 2021-09-18T19:27:52.173164 | 2018-07-18T12:12:51 | 2018-07-18T12:12:51 | 111,005,462 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | # Wrong Answer
# Python 3
from itertools import permutations
a, d = input().split()
d = int(d)
l = list(permutations(a, d))
le = len(l)
l.sort()
print(type(l))
for i in range(le):
for j in range(d):
print(l[i][j], end='')
print()
| [
"abidkhan484@gmail.com"
] | abidkhan484@gmail.com |
760835aac96caa102a6730cfbfd834d74bb73423 | a68eba95b9e2c7d2ee9dab187489d385f0824545 | /backend/apps/common/views.py | 2d78db76757022a41d2ae0eb46f221ff9559db4d | [] | no_license | MMatlacz/hack_yeah2020 | abae4f5c7095229908239a130835c7e03389d8dd | 23d427061fbeebc7339b2687449379c8e5333e78 | refs/heads/master | 2022-04-11T21:47:24.558780 | 2020-04-05T07:08:09 | 2020-04-05T07:08:09 | 252,783,591 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | from typing import (
Callable,
ClassVar,
Iterable,
Mapping,
Optional,
Union,
)
from flask import (
Response,
request,
)
from flask.views import MethodView
IterableOfDecorators = Iterable[Callable]
MethodDecorators = Union[
Mapping[str, IterableOfDecorators],
IterableOfDecorators,
]
class APIView(MethodView):
"""Custom ``flask.views.MethodView`` subclass.
Following class is improved copy of `flask_restful.Resource`.
Attributes
----------
method_decorators
decorators that will be applied on methods.
When set to mapping, keys are HTTP methods' names and values are
iterables of decorators that will be applied to proper method.
If ``method_decorators`` are set to list then decorators
will be applied to *every* HTTP methods' method.
"""
method_decorators: ClassVar[Optional[MethodDecorators]] = None
def dispatch_request(self, *args, **kwargs) -> Response:
# Taken from flask and flask-restful
# noinspection PyUnresolvedReferences
method = getattr(self, request.method.lower(), None)
# If the request method is HEAD and we don't have a handler for it
# retry with GET.
if method is None and request.method == 'HEAD':
method = getattr(self, 'get', None)
message = f'Unimplemented method {request.method}'
assert method is not None, message # noqa: S101
decorators = self.method_decorators or []
if isinstance(decorators, Mapping):
decorators = decorators.get(request.method.lower(), [])
for decorator in decorators:
method = decorator(method)
return method(*args, **kwargs)
| [
"skarzynski_lukasz@protonmail.com"
] | skarzynski_lukasz@protonmail.com |
54594211e297c0ae6c46fe9a94edb459ae121083 | 5f12a28c6dbf078a9a67ca1b88af775ad68f3fb6 | /dataDel/wordcount_noemoji.py | c50cbb3021a8ca9ef373c4f2f1a927e3a500c474 | [] | no_license | Messiff10/on-line_data_process | a91e4abefdc074585bebaf3de83d4e0b16a154ef | b78970f646e2242b2a734f1d5a184e3a2cf34a20 | refs/heads/master | 2020-12-03T14:55:13.000382 | 2020-01-03T10:25:48 | 2020-01-03T10:25:48 | 231,357,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,981 | py | import re
import sys
## 统计词频和单词
## 匹配键码生成训练数据
import emoji
##
## 参数列表
## 参数一:语言locale 参数二:
regex = re.compile('\s+')
language = sys.argv[1]
if language == "en_US":
WORD_REGEX = re.compile(r"[^a-zA-Z']")
elif language == "it":
WORD_REGEX = re.compile(r"[^qwertyuiìíopèéùúasdfghjklòóàzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM']")
elif language == "fi":
WORD_REGEX = re.compile(r"[^abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZAÅÄOÖ']")
elif language == "tr":
WORD_REGEX = re.compile(r"[^ertyuıopğüasdfghjklşizcvbnmöçERTYUIOPĞÜASDFGHJKLŞİZCVBNMÖÇ']")
elif language == "ru":
WORD_REGEX = re.compile(r"[^йцукенгшщзхфывапролджэячсмитьбюЙЦУКЕНГШЩЗХФЫВАПРОЛДЖЭЯЧСМИТЬБЮ']")
elif language == "es":
WORD_REGEX = re.compile(r"[^qwertyuiopasdfghjklñzxcvbnmQWERTYUIOPASDFGHJKLÑZXCVBNM']")
elif language == "es_US":
WORD_REGEX = re.compile(r"[^qwertyuiopasdfghjklñzxcvbnmQWERTYUIOPASDFGHJKLÑZXCVBNM']")
elif language == "ms_MY":
WORD_REGEX = re.compile(r"[^qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM']")
elif language == "pl":
WORD_REGEX = re.compile(r"[^aąbcćdeęfghijklłmnńoóprsśtuwyzźżAĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ']")
elif language == "sv":
WORD_REGEX = re.compile(r"[^abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZAÅÄOÖ']")
elif language == "th":
WORD_REGEX = re.compile(
r"[^\u0E01\u0E02\u0E03\u0E04\u0E05\u0E06\u0E07\u0E08\u0E09\u0E0A\u0E0B\u0E0C\u0E0D"
r"\u0E0E\u0E0F\u0E10\u0E11\u0E12\u0E13\u0E14\u0E15\u0E16\u0E17\u0E18\u0E19\u0E1A"
r"\u0E1B\u0E1C\u0E1D\u0E1E\u0E1F\u0E20\u0E21\u0E22\u0E23\u0E24\u0E25\u0E26\u0E27\u0E28"
r"\u0E29\u0E2A\u0E2B\u0E2C\u0E2D\u0E2E\u0E2F\u0E30\u0E31\u0E32\u0E33\u0E34\u0E35\u0E36"
r"\u0E37\u0E38\u0E39\u0E3A\u0E3F\u0E40\u0E41\u0E42\u0E43\u0E44\u0E45\u0E46\u0E47\u0E48"
r"\u0E49\u0E4A\u0E4B\u0E4C\u0E4D\u0E4E\u0E4F\u0E50\u0E51\u0E52\u0E53\u0E54\u0E55\u0E56"
r"\u0E57\u0E58\u0E59\u0E5A\u0E5B']")
elif language == "ar":
WORD_REGEX = re.compile(r"[^\s'ضصثقفغعهخحجشسيبلاتنمكطذءؤرىةوزظدئإأآڨڭپڢڤچ]+")
elif language == "de":
WORD_REGEX = re.compile(r"[^qwertzuiopüasdfghjklöäyxcvbnmßQWERTZUIOPÜASDFGHJKLÖÄYXCVBNMẞ']")
elif language == "da":
WORD_REGEX = re.compile(r"[^qwertyuiopåasdfghjkløæzxcvbnmQWERTYUIOPÅASDFGHJKLØÆZXCVBNM']")
elif language == "nb":
WORD_REGEX = re.compile(r"[^qwertyuiopåasdfghjkløæzxcvbnmQWERTYUIOPÅASDFGHJKLØÆZXCVBNM']")
elif language == "cs":
WORD_REGEX = re.compile(
r"[^aábcčdďeéěfghchiíjklmnňoópqrřsštťuúůvwxyýzžAÁBCČDĎEÉĚFGHChIÍJKLMNŇOÓPQRŘSŠTŤUÚŮVWXYÝZŽ']")
elif language == "ur":
WORD_REGEX = re.compile(r"[^ےیءھہونملگکقفغعظطضصشسژڑرذڈدخحچجثٹتپباآ']")
elif language == "ko":
WORD_REGEX = re.compile(r"[^ㅂㄷㅈㄱㅃㄸㅉㄲㅍㅌㅊㅋㅅㅎㅆㅁㄴㅇㄹㅣㅔㅚㅐㅏㅗㅜㅓㅡㅢㅖㅒㅑㅛㅠㅕㅟㅞㅙㅘㅝ']")
elif language == "fr":
WORD_REGEX = re.compile(r"[^éèêëcçàâæazertyÿuiîïoôœpqsdfghjklmùûüwxcvbnAÀÆZEÉÈÊËCÇRTYŸUÛÜIÎÏOÔŒPQSDFGHJKLMWXCVBN']")
count_dict = {}
letterset = set()
emojiset = set()
file_path = sys.argv[2]
emoji_path = sys.argv[3]
with open(emoji_path, 'r', encoding='utf-8') as f_emoji:
for line in f_emoji:
emojis = line.strip().split('\t')
if emojis[0] not in emojiset:
emojiset.add(emojis[0])
with open(file_path, 'r', encoding='utf-8') as f_in:
for line in f_in:
words = regex.split(line.strip())
# 如果字典里有该单词则加1,否则添加入字典
for w in words:
if re.search(WORD_REGEX, w.strip()) is None:
# print(w)
if w.strip() in count_dict.keys():
count_dict[w] = count_dict[w] + 1
else:
count_dict[w] = 1
# 按照词频从高到低排列
count_list = sorted(count_dict.items(), key=lambda x: int(x[1]), reverse=True)
with open(file_path.replace('.txt', '.wordcount'), 'w', encoding='utf-8') as f_word:
for l in count_list:
s1 = str(l[0]) + "\t" + str(l[1])
alist = [ch for ch in str(l[0]).lower()]
for letter in alist:
if re.search(WORD_REGEX, letter) is None:
if letter not in letterset:
print(letter)
letterset.add(letter)
f_word.write(s1)
f_word.write('\n')
with open(file_path.replace('.txt', '.letter'), 'w', encoding='utf-8') as f_letter:
for lt in sorted(letterset):
f_letter.write(lt.strip())
f_letter.write('\n')
f_letter.close()
f_word.close()
f_in.close()
print("Finish line")
| [
"zhongfang.zhang@kikatech.com"
] | zhongfang.zhang@kikatech.com |
6c1b28c833f4fedb25620875cd937e29e71c6300 | c268dcf432f3b7171be6eb307aafbe1bd173285a | /reddit2telegram/channels/~inactive/brandnewsentence/app.py | c4e82d15fe37149058d98dd23d6c7438cc2c9b7f | [
"MIT"
] | permissive | Fillll/reddit2telegram | a7162da2cc08c81bcc8078ea4160d4ee07461fee | 5d8ee3097e716734d55a72f5a16ce3d7467e2ed7 | refs/heads/master | 2023-08-09T10:34:16.163262 | 2023-07-30T18:36:19 | 2023-07-30T18:36:19 | 67,726,018 | 258 | 205 | MIT | 2023-09-07T02:36:36 | 2016-09-08T17:39:46 | Python | UTF-8 | Python | false | false | 153 | py | #encoding:utf-8
subreddit = 'BrandNewSentence'
t_channel = '@BrandNewSentence'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| [
"git@fillll.ru"
] | git@fillll.ru |
3b75c223218874c78c26f890a9b810b03cb8d77d | 8a780cb47eac9da046bdb5d6917f97a086887603 | /problems/perfect_squares/solution.py | 54396be213034cac0f5f72f5dcfeedf62be4a092 | [] | no_license | dengl11/Leetcode | d16315bc98842922569a5526d71b7fd0609ee9fb | 43a5e436b6ec8950c6952554329ae0314430afea | refs/heads/master | 2022-12-20T03:15:30.993739 | 2020-09-05T01:04:08 | 2020-09-05T01:04:08 | 279,178,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | class Solution:
def numSquares(self, n: int) -> int:
if n < 2: return n
squares = set()
i = 1
while i ** 2 <= n:
squares.add(i ** 2)
i += 1
q = [n]
cnt = 1
while q:
nq = []
for x in q:
if x in squares: return cnt
for y in squares:
if x > y:
nq.append(x - y)
cnt += 1
q = nq | [
"ldeng1314@gmail.com"
] | ldeng1314@gmail.com |
68a67f7d0bd54b98e1587ff16a40b64c5d0fee21 | 15f0514701a78e12750f68ba09d68095172493ee | /Python3/729.py | fd01f0fd395d33cbda83d3059ecd058733a1166a | [
"MIT"
] | permissive | strengthen/LeetCode | 5e38c8c9d3e8f27109b9124ae17ef8a4139a1518 | 3ffa6dcbeb787a6128641402081a4ff70093bb61 | refs/heads/master | 2022-12-04T21:35:17.872212 | 2022-11-30T06:23:24 | 2022-11-30T06:23:24 | 155,958,163 | 936 | 365 | MIT | 2021-11-15T04:02:45 | 2018-11-03T06:47:38 | null | UTF-8 | Python | false | false | 1,774 | py | __________________________________________________________________________________________________
sample 220 ms submission
from bisect import bisect_right
class MyCalendar:
def __init__(self):
self.intervals = [[-2, -1], [10 ** 9 + 1, 10 ** 9 + 2]]
def book(self, start: int, end: int) -> bool:
curr = [start, 10 ** 9 + 1]
index = bisect_right(self.intervals, curr)
if(self.intervals[index - 1][1] > start or
self.intervals[index][0] < end):
return False
self.intervals.insert(index, [start, end])
return True
# Your MyCalendar object will be instantiated and called as such:
# obj = MyCalendar()
# param_1 = obj.book(start,end)
__________________________________________________________________________________________________
sample 13400 kb submission
class MyCalendar:
def __init__(self):
self.intervals = []
def book(self, start: int, end: int) -> bool:
if not self.intervals or end <= self.intervals[0][0]:
self.intervals = [[start, end]] + self.intervals
return True
if self.intervals[-1][1] <= start:
self.intervals.append([start, end])
return True
for ind in range(1, len(self.intervals)):
i = self.intervals[ind]
prev = self.intervals[ind - 1]
if start >= prev[1] and end <= i[0]:
self.intervals.insert(ind, [start, end])
return True
return False
# Your MyCalendar object will be instantiated and called as such:
# obj = MyCalendar()
# param_1 = obj.book(start,end)
__________________________________________________________________________________________________
| [
"strengthen@users.noreply.github.com"
] | strengthen@users.noreply.github.com |
29cf44adb474a34dd6fdefc5ffcf386ae5cc2391 | c5698844e4c5cd6428d25f5a97a2f4ad069df251 | /datacamp/Introduction to Data Visualization with Python/Working with 2D arrays/linspace meshgrid ejercicio3.py | e6922f5e3d910786ed16ac3ad722e44b6180a100 | [] | no_license | jrartd/Python-tools | 1ade026dcc9b3987bb7a6af130403895a8456d3c | 361031a2d108e048d267bf386a8a703359a81321 | refs/heads/master | 2022-12-21T23:38:53.038535 | 2018-02-09T18:18:10 | 2018-02-09T18:18:10 | 114,409,529 | 0 | 1 | null | 2022-12-12T09:18:07 | 2017-12-15T20:41:15 | HTML | UTF-8 | Python | false | false | 490 | py | import numpy as np
import matplotlib.pyplot as plt
listasnos = []
def ejex(anonp,value):
while (anonp < value):
listasnos.append(anonp)
anonp = anonp + 1
ejex(1900,2000)
indicey = []
def ejey(indice, value):
while (indice < value):
indicey.append(indice)
indice = indice + 1
ejey(0,1000)
#numpy
ejex = np.array(listasnos)
ejey = np.array(indicey)
x,y = np.meshgrid(ejex, ejey)
z = x**2 + y**4
plt.set_cmap("nipy_spectral")
plt.contour(z)
plt.colorbar()
plt.show()
| [
"you@example.com"
] | you@example.com |
c16d0ada511f68c61f1888c465bb3f79358a96f8 | f338eb32c45d8d5d002a84798a7df7bb0403b3c4 | /FastSimulation/EgammaElectronAlgos/python/pixelMatchElectronL1IsoLargeWindowSequenceForHLT_cff.py | b70124609eed63b9a61312b2166558ab82253616 | [] | permissive | wouf/cmssw | 0a8a8016e6bebc611f1277379e12bef130464afb | 60da16aec83a0fc016cca9e2a5ed0768ba3b161c | refs/heads/CMSSW_7_3_X | 2022-06-30T04:35:45.380754 | 2015-05-08T17:40:17 | 2015-05-08T17:40:17 | 463,028,972 | 0 | 0 | Apache-2.0 | 2022-02-24T06:05:30 | 2022-02-24T06:05:26 | null | UTF-8 | Python | false | false | 3,657 | py | import FWCore.ParameterSet.Config as cms
#
# create a sequence with all required modules and sources needed to make
# pixel based electrons
#
# NB: it assumes that ECAL clusters (hybrid) are in the event
#
#
# modules to make seeds, tracks and electrons
# include "RecoEgamma/EgammaHLTProducers/data/egammaHLTChi2MeasurementEstimatorESProducer.cff"
# Cluster-seeded pixel pairs
#import FastSimulation.EgammaElectronAlgos.fastElectronSeeds_cfi
# (Not-so) Regional Tracking
from FastSimulation.Tracking.GlobalPixelTracking_cff import *
#####put the hack! choose 1E31 that is more inclusive!
#from FastSimulation.Configuration.blockHLT_1E31_cff import *
#####
##new path in XXXX_49 for 1E31 only
#hltL1IsoLargeElectronPixelSeeds = FastSimulation.EgammaElectronAlgos.fastElectronSeeds_cfi.fastElectronSeeds.clone()
#hltL1IsoLargeElectronPixelSeeds.SeedConfiguration = cms.PSet(
# block_hltL1IsoLargeElectronPixelSeeds
#)
#hltL1IsoLargeElectronPixelSeeds.barrelSuperClusters = 'hltCorrectedHybridSuperClustersL1Isolated'
#hltL1IsoLargeElectronPixelSeeds.endcapSuperClusters = 'hltCorrectedMulti5x5EndcapSuperClustersWithPreshowerL1Isolated'
#
#
#
#hltL1IsoLargeWindowElectronPixelSeeds = FastSimulation.EgammaElectronAlgos.fastElectronSeeds_cfi.fastElectronSeeds.clone()
#hltL1IsoLargeWindowElectronPixelSeeds.SeedConfiguration = cms.PSet(
# block_hltL1IsoLargeWindowElectronPixelSeeds
#)
#hltL1IsoLargeWindowElectronPixelSeeds.barrelSuperClusters = 'hltCorrectedHybridSuperClustersL1Isolated'
#hltL1IsoLargeWindowElectronPixelSeeds.endcapSuperClusters = 'hltCorrectedMulti5x5EndcapSuperClustersWithPreshowerL1Isolated'
# Track candidates
import FastSimulation.Tracking.TrackCandidateProducer_cfi
hltCkfL1IsoLargeWindowTrackCandidates = FastSimulation.Tracking.TrackCandidateProducer_cfi.trackCandidateProducer.clone()
hltCkfL1IsoLargeWindowTrackCandidates.SeedProducer = cms.InputTag("hltL1IsoLargeWindowElectronPixelSeeds")
hltCkfL1IsoLargeWindowTrackCandidates.TrackProducers = cms.VInputTag(cms.InputTag("hltCtfL1IsoWithMaterialTracks"))
hltCkfL1IsoLargeWindowTrackCandidates.MaxNumberOfCrossedLayers = 999
hltCkfL1IsoLargeWindowTrackCandidates.SeedCleaning = True
hltCkfL1IsoLargeWindowTrackCandidates.SplitHits = False
# CTF track fit with material
import RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cfi
ctfL1IsoLargeWindowTracks = RecoTracker.TrackProducer.CTFFinalFitWithMaterial_cfi.ctfWithMaterialTracks.clone()
ctfL1IsoLargeWindowTracks.src = 'hltCkfL1IsoLargeWindowTrackCandidates'
ctfL1IsoLargeWindowTracks.TTRHBuilder = 'WithoutRefit'
ctfL1IsoLargeWindowTracks.Fitter = 'KFFittingSmootherForElectrons'
ctfL1IsoLargeWindowTracks.Propagator = 'PropagatorWithMaterial'
# Merge
hltCtfL1IsoLargeWindowWithMaterialTracks = cms.EDProducer("FastTrackMerger",
SaveTracksOnly = cms.untracked.bool(True),
TrackProducers = cms.VInputTag(cms.InputTag("ctfL1IsoLargeWindowTracks"),
cms.InputTag("hltCtfL1IsoWithMaterialTracks"))
)
HLTPixelMatchLargeWindowElectronL1IsoTrackingSequence = cms.Sequence(hltCkfL1IsoLargeWindowTrackCandidates+
ctfL1IsoLargeWindowTracks+
hltCtfL1IsoLargeWindowWithMaterialTracks+
cms.SequencePlaceholder("hltPixelMatchLargeWindowElectronsL1Iso"))
hltL1IsoLargeWindowElectronPixelSeedsSequence = cms.Sequence(globalPixelTracking+
cms.SequencePlaceholder("hltL1IsoLargeWindowElectronPixelSeeds"))
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
26996939dcaba7d0c128ad4403b63fc782b868a5 | 4c8fac087fd39aeadd5af6ae716e45ef780871c1 | /day_05/day_05_b.py | 41dbd747d351d72cd95bcd8020bed5ced1be47fa | [] | no_license | HenriBranken/Advent_of_Code_2017_python_3 | 412753feecf732a7c66656ebbbb3fbad3cb950d8 | 62e19d3af0b3f1f0bfb504f84fa0b12cb497feda | refs/heads/master | 2023-02-24T14:28:34.087881 | 2023-02-18T13:46:14 | 2023-02-18T13:46:14 | 118,493,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | # .-------------------------------------------------------------------------------------.
# | Puzzle Author: Eric Wastl; http://was.tl/ |
# | Python 3 Solution Author: Henri Branken |
# | GitHub Repository: https://github.com/HenriBranken/Advent_of_Code_2017_python_3 |
# | My email address: henri dot branken777 at gmail dot com |
# | Date: December 2017 |
# | Event: Advent of Code 2017 |
# | Day 5: A Maze of Twisty Trampolines, All Alike, Part 2 |
# | Website: https://adventofcode.com/2017/day/5 |
# | |
# | This does not necessarily represent the most optimized solution to the problem, but |
# | simply the solution that I came up with at the time using Python 3. |
# '-------------------------------------------------------------------------------------'
instructions = []
f = open("day_05_input.txt") # Your puzzle input
for line in f.readlines():
if line in ["", " ", "\n"]:
break
line = line.rstrip()
instructions.append(int(line))
f.close()
max_index = len(instructions) - 1
cumulative = 0
index = 0
# Here we appropriately decrement or increment instructions[index] based on the value of instructions[index].
# Again, if the running index value exceeds the index boundaries of the instructions list, we break and output
# the cumulative value.
# For every legal jump we increment the cumulative value.
while True:
n_jumps = instructions[index]
if n_jumps >= 3:
instructions[index] = n_jumps - 1
else:
instructions[index] = n_jumps + 1
index += n_jumps
cumulative += 1
if (index > max_index) or (index < 0):
break
print(str(cumulative) + " steps to reach the exit.")
# Answer is 28372145.
| [
"henri.branken777@gmail.com"
] | henri.branken777@gmail.com |
a193bb3f66cde2dc14f960823f3ee64b1b74f5e5 | eef9afdc9ab5ee6ebbfa23c3162b978f137951a8 | /decorators_py/Methods_of_Decorators/static_method.py | d8d52c226e6edcb5364ad282285f4ecc9fc73d7f | [] | no_license | krishnadhara/programs-venky | 2b679dca2a6158ecd6a5c28e86c8ed0c3bab43d4 | 0f4fd0972dec47e273b6677bbd32e2d0742789ae | refs/heads/master | 2020-06-05T04:48:45.792427 | 2019-06-17T09:59:45 | 2019-06-17T09:59:45 | 192,318,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | class Decorator:
def __init__(self):
print("INIT based method")
@staticmethod
def Example_method():
print("This is static method")
print("End the static method")
de = Decorator()
de.Example_method()
| [
"49470138+krishnadhara@users.noreply.github.com"
] | 49470138+krishnadhara@users.noreply.github.com |
ae737febd20b2b4026a97a9abb5c4929b07c3708 | 433cef9d401e1b349c6d4c96fb07fe39b875a620 | /convnets.py | d4e0c215befb783d8593fc22c5dbd42932dbc65d | [] | no_license | hchen13/caps | 32cc9cdc1a476d8e7cd0f2d7c5041e2930665c87 | 4d049c9dd610ba686957c821c7eafc40ec3d0adb | refs/heads/master | 2020-03-19T07:33:41.229606 | 2018-11-15T06:18:40 | 2018-11-15T06:18:40 | 136,124,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,345 | py | import numpy as np
from keras import Model, Input, optimizers
from keras import backend as K
from keras.layers import Dense, LSTM, Dropout, Conv1D, MaxPooling1D, Flatten, Concatenate, AveragePooling1D, \
BatchNormalization, regularizers
def elliptic_paraboloid_weight(x, y, diff_weight, same_weight):
"""This function produces a coefficient between x and y based on rotated elliptic
paraboloid function, which suggests a large value if x and y are in different
directions and a small value otherwise.
:param x: first value, could be a numpy array
:param y: seconde value, should have the same shape as `x`
:param diff_weight: the penalty weight for different direction
:param same_weight: the penalty weight for same direction
:return: a coefficient
"""
t = -np.pi / 4 # rotate angle
x_rot = x * np.cos(t) + y * np.sin(t)
y_rot = -x * np.sin(t) + y * np.cos(t)
z = x_rot ** 2 / diff_weight + y_rot ** 2 / same_weight
return z
def directional_loss(y, y_hat):
squared_error = K.mean(K.square(y - y_hat))
diff_sign = y * y_hat * 4
sign_error = 2 - K.sigmoid(diff_sign)
return squared_error * sign_error
def directional_accuracy(y, y_hat):
return K.mean(y * y_hat > 0)
def precision(y, y_hat):
true_positives = K.sum(K.round(K.clip(y * y_hat, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_hat, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y, y_hat):
true_positives = K.sum(K.round(K.clip(y * y_hat, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y, 0, 1)))
return true_positives / (possible_positives + K.epsilon())
def f1_score(y, y_hat):
p = precision(y, y_hat)
r = recall(y, y_hat)
return 2 / (1 / p + 1 / r)
def conv(filters, kernel_size, lambd=0.001):
return Conv1D(filters, kernel_size, padding='same', activation='relu', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(lambd))
def direction_inception_model(input_shape, lambd):
def inception(layer, out_channels):
num_channels = int(out_channels / 2)
# equivalence to the 1x1 convolution
conv1 = conv(num_channels, 1, lambd)(layer)
conv3 = conv(32, 1, lambd)(layer)
conv3 = conv(int(num_channels / 2), 3, lambd)(conv3)
conv6 = conv(16, 1, lambd)(layer)
conv6 = conv(int(num_channels / 4), 6, lambd)(conv6)
pool = MaxPooling1D(strides=1, padding='same')(layer)
pool = conv(int(num_channels / 4), 1, lambd)(pool)
incep = Concatenate(axis=2)([conv1, conv3, conv6, pool])
return incep
inputs = Input(shape=input_shape[1:])
f = conv(16, 6, lambd)(inputs)
f = conv(16, 6, lambd)(f)
p = MaxPooling1D()(f)
p = BatchNormalization()(p)
f = conv(32, 1, lambd)(p)
f = conv(32, 6, lambd)(f)
p = MaxPooling1D()(f)
p = BatchNormalization()(p)
p = inception(p, 128)
p = BatchNormalization()(p)
f = conv(128, 1, lambd)(p)
f = conv(128, 3, lambd)(f)
p = MaxPooling1D()(f)
p = BatchNormalization()(p)
p = inception(p, 256)
p = BatchNormalization()(p)
f = conv(256, 1, lambd)(p)
f = conv(256, 3, lambd)(f)
p = MaxPooling1D()(f)
f = conv(512, 1, lambd)(p)
f = conv(1024, 3, lambd)(f)
p = MaxPooling1D()(f)
p = BatchNormalization()(p)
p = inception(p, 1024)
p = BatchNormalization()(p)
feature_vec = Flatten(name='bottleneck')(p)
dense = Dense(800, activation='relu')(feature_vec)
dense = Dropout(.2)(dense)
d = Dense(1, activation='sigmoid', name='direction')(dense)
model = Model(inputs=inputs, outputs=d)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', precision, recall, f1_score])
model.summary()
return model
def direction_vgg_model(input_shape, keep_prob=.2):
inputs = Input(shape=input_shape[1:])
def conv(filters, kernel_size, lambd=0.001):
return Conv1D(filters, kernel_size, padding='same', activation='relu', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(lambd))
f = conv(16, 6)(inputs)
f = conv(16, 6)(f)
p = MaxPooling1D()(f)
p = Dropout(keep_prob)(p)
f = conv(32, 1)(p)
f = conv(32, 6)(f)
f = conv(32, 6)(f)
p = MaxPooling1D()(f)
p = Dropout(keep_prob)(p)
f = conv(64, 1)(p)
f = conv(64, 3)(f)
f = conv(64, 3)(f)
p = MaxPooling1D()(f)
p = Dropout(keep_prob)(p)
f = conv(128, 1)(p)
f = conv(128, 3)(f)
f = conv(128, 3)(f)
p = MaxPooling1D()(f)
p = Dropout(keep_prob)(p)
f = conv(256, 1)(p)
f = conv(256, 3)(f)
f = conv(256, 3)(f)
p = MaxPooling1D()(f)
p = Dropout(keep_prob)(p)
f = conv(512, 1)(p)
f = conv(512, 3)(f)
f = Dropout(keep_prob)(f)
f = conv(512, 3)(f)
p = AveragePooling1D()(f)
p = Dropout(keep_prob)(p)
feature_vec = Flatten(name='bottleneck')(p)
d = Dense(
1, activation='sigmoid', name='direction',
kernel_regularizer=regularizers.l2(.003)
)(feature_vec)
model = Model(inputs=inputs, outputs=d)
from keras.optimizers import SGD
sgd = SGD()
def biased_loss(y, y_hat):
from keras.losses import binary_crossentropy
return (2 - precision(y, y_hat)) * binary_crossentropy(y, y_hat)
model.compile(optimizer='adam', loss=biased_loss, metrics=['acc', precision, recall, f1_score])
model.summary()
return model
def direction_lstm2(input_shape, lambd=0.0001):
inputs = Input(shape=input_shape[1:])
def lstm(units, return_sequences=False):
return LSTM(
units, return_sequences=return_sequences,
kernel_regularizer=regularizers.l2(lambd),
recurrent_regularizer=regularizers.l2(lambd),
activity_regularizer=regularizers.l2(lambd)
)
f = lstm(32, return_sequences=True)(inputs)
f = Dropout(.2)(f)
f = lstm(64)(f)
# f = Dense(256, kernel_regularizer=regularizers.l2(lambd), activation='relu')(f)
d = Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l2(lambd))(f)
model = Model(inputs=inputs, outputs=d)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', precision, recall])
model.summary()
return model
| [
"gradschool.hchen13@gmail.com"
] | gradschool.hchen13@gmail.com |
fb93808b547473d3f3112186626674fc06c543a6 | a8d3471d8d2fa914a0f8bab0495b115e70538430 | /deploy/policy.py | 4868fe2ecc2939c424f8394e6cf9ca4ec1200965 | [] | no_license | davidbegin/morguebot | a03bbfa46cbfd6b916ace98a2f476e44d4843349 | f62d75917d13b4a443c0f3c8e00f7dd82720d9f3 | refs/heads/master | 2022-12-22T14:52:20.398139 | 2020-01-17T15:56:04 | 2020-01-17T15:56:04 | 213,519,437 | 8 | 0 | null | 2022-12-08T06:49:48 | 2019-10-08T01:16:15 | Python | UTF-8 | Python | false | false | 1,587 | py | import difflib
import json
{
"Version": "2012-10-17",
"Id": "MorgueFileBucketPolicy",
"Statement": [
{
"Sid": "Allow",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::851075464416:role/morgue-bot-lambda-role-782df96",
"arn:aws:iam::851075464416:role/morgue-stalker-role-39eb325",
]
},
"Action": "s3:*",
"Resource": "arn:aws:s3:::morgue-files-2944dfb/*",
}
],
}
{
"Version": "2012-10-17",
"Id": "MorgueFileBucketPolicy",
"Statement": [
{
"Sid": "AllowThingsInTheBucket",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::851075464416:role/morgue-bot-lambda-role-782df96",
"arn:aws:iam::851075464416:role/morgue-stalker-role-39eb325",
]
},
"Action": "s3:*",
"Resource": "arn:aws:s3:::morgue-files-2944dfb/*",
},
{
"Sid": "AllowTheBucket",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::851075464416:role/morgue-bot-lambda-role-782df96",
"arn:aws:iam::851075464416:role/morgue-stalker-role-39eb325",
]
},
"Action": "s3:*",
"Resource": "arn:aws:s3:::morgue-files-2944dfb",
},
],
}
d = difflib.Differ()
result = list(d.compare(x, y))
print(result)
| [
"davidmichaelbe@gmail.com"
] | davidmichaelbe@gmail.com |
18358f38d87a5aa21e563d63e50ecbfff6573b09 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_4/112.py | e839d9df0ca60a94910c1bfca26c9f226cc56b22 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | #!/usr/bin/env python2.5
"""
Google Code Jam 2008
Solution for the "Minimum Scalar Product" problem.
rbp@isnomore.net
Usage: scalar.py input_file output_file
"""
import sys
def read_input():
in_file = file(sys.argv[1])
in_text = in_file.readlines()
n_cases = int(in_text.pop(0))
cases = []
for i in range(n_cases):
vector_size = int(in_text.pop(0))
cases.append({'v1': [int(i) for i in in_text.pop(0).split()]})
cases[-1].update({'v2': [int(i) for i in in_text.pop(0).split()]})
cases[-1].update({'size': vector_size})
#print scalar(cases[-1]['v1'], cases[-1]['v2'])
in_file.close()
return cases
def scalar(v1, v2):
return sum([i*j for i, j in zip(v1, v2)])
def solve_case(c):
v1 = c['v1'][:]
v2 = c['v2'][:]
v1.sort()
v2.sort(reverse=True)
return scalar(v1, v2)
if __name__ == '__main__':
cases = read_input()
#print cases
output = "\n".join(['Case #%d: %d' % (i+1, solve_case(c))
for i, c in enumerate(cases)])
if len(sys.argv) < 3:
print output
else:
out_file = file(sys.argv[2], "w")
out_file.write(output)
out_file.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
9d2ff08012455b3d0049aaa3a36b5c25c33d0e80 | 1316cd6763e784811c769c1de577235c921af0de | /Apps/testAPP/mainApp.py | 3cb7c9d4b1d4f31d691d93f7e159252bd757442b | [] | no_license | VELA-CLARA-software/Software | a6fb6b848584e5893fd6939a447d23134ce636cc | 2e2a88ac0b2b03a495c868d2e11e6481e05097c3 | refs/heads/master | 2023-02-05T07:40:58.260798 | 2023-01-27T09:39:09 | 2023-01-27T09:39:09 | 69,860,536 | 7 | 3 | null | 2021-04-07T14:17:07 | 2016-10-03T10:20:46 | Mathematica | UTF-8 | Python | false | false | 748 | py | import sys,os
sys.path.append(str(os.path.dirname(os.path.abspath(__file__)))+'\\model')
sys.path.append(str(os.path.dirname(os.path.abspath(__file__)))+'\\controller')
sys.path.append(str(os.path.dirname(os.path.abspath(__file__)))+'\\view')
from PyQt4 import QtGui, QtCore
import model
import controller
import view
class App(QtGui.QApplication):
def __init__(self, sys_argv):
super(App, self).__init__(sys_argv)
print'Well this is fun'
self.view= view.Ui_MainWindow()
self.MainWindow = QtGui.QMainWindow()
self.view.setupUi(self.MainWindow)
self.model = model.Model()
self.controller = controller.Controller(self.view,self.model)
self.MainWindow.show()
if __name__ == '__main__':
app = App(sys.argv)
sys.exit(app.exec_())
| [
"tim.price@stfc.ac.uk"
] | tim.price@stfc.ac.uk |
fa17c8d42f17ffd01cff18f6653e2e19ef68d965 | d99ac626d62c663704444a9cce7e7fc793a9e75e | /crypto_implementations/virgil-crypto-c/wrappers/python/virgil_crypto_lib/common/_c_bridge/_buffer.py | 5e9c70b128467ebc4aba31b85b9a1be58230ee68 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Experiment5X/CryptoFunctionDetection | 3ab32d5573a249d24db1faf772721bc80b8d905d | dac700193e7e84963943593e36844b173211a8a1 | refs/heads/master | 2023-04-19T09:12:35.828268 | 2021-05-13T22:39:27 | 2021-05-13T22:39:27 | 355,299,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,892 | py | # Copyright (C) 2015-2020 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from ctypes import *
from ._vsc_buffer import VscBuffer
class Buffer(object):
def __init__(self, capacity):
self._lib_vsc_buffer = VscBuffer()
self._bytes_ = (c_byte * capacity)()
self.c_buffer = self._lib_vsc_buffer.vsc_buffer_new()
self._lib_vsc_buffer.vsc_buffer_use(
self.c_buffer,
self._bytes_,
c_size_t(capacity)
)
def __len__(self):
return self._lib_vsc_buffer.vsc_buffer_len(self.c_buffer)
def __eq__(self, other):
return self._lib_vsc_buffer.vsc_buffer_equal(self.c_buffer, other.c_buffer)
def __bytes__(self):
return self.get_bytes()
def __delete__(self, instance):
self._lib_vsc_buffer.vsc_buffer_destroy(self.c_buffer)
@classmethod
def take_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vsc_buffer = VscBuffer()
inst.c_buffer = c_ctx
return inst
@classmethod
def use_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vsc_buffer = VscBuffer()
inst.c_buffer = inst._lib_vsc_buffer.vsc_buffer_shallow_copy(c_ctx)
return inst
def get_bytes(self):
return bytearray(self._bytes_)[:self._lib_vsc_buffer.vsc_buffer_len(self.c_buffer)]
| [
"xmeadamx@gmail.com"
] | xmeadamx@gmail.com |
56e6a43555bf6d6aa21d55c302cc0c0f669a66e7 | 81d2e3b6fe042e70cc2abb7f549f60ba44928fdf | /array/66.加一.py | be8dbff5eb4701bce34a5943e35ea712c0621c85 | [] | no_license | weizhixiaoyi/leetcode | a506faed3904342ed65234864df52071977d544d | 6114ebacc939f48a39a56d366646b0f28b4f6c1a | refs/heads/master | 2022-12-22T03:52:07.936800 | 2020-09-29T07:49:52 | 2020-09-29T07:49:52 | 202,662,720 | 5 | 2 | null | 2019-08-17T09:24:49 | 2019-08-16T05:16:08 | C++ | UTF-8 | Python | false | false | 675 | py | # -*- coding:utf-8 -*-
from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
if digits[0] == 0: return [1]
digits_len = len(digits)
i = digits_len - 1
digits[i] += 1
carry = 0
while i >= 0:
digits[i] = digits[i] + carry
if digits[i] >= 10:
carry = 1
digits[i] = digits[i] % 10
else:
carry = 0
i -= 1
if carry:
digits = [carry] + digits
return digits
if __name__ == '__main__':
digits = [8, 9, 9, 9]
ans = Solution().plusOne(digits)
print(ans)
| [
"zhenhai.gl@gmail.com"
] | zhenhai.gl@gmail.com |
3a7331ba88188bcdb9784765a2aa844d82e91c41 | 3e0468eb7101281ff8133b2acd08b6f83f8953f9 | /chap6/demolist2.py | 072af26b627e7be89b2bf23c0f0eac5e55f585cf | [] | no_license | etoy0328/python_base_knowledge | 4e514f93b844a1e5d2a654267cf5ea295ae634e2 | 7db140e838939da1ddf9967f82fc78d109aa6362 | refs/heads/master | 2023-03-22T10:23:28.211691 | 2021-03-16T10:32:42 | 2021-03-16T10:32:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | # 中国矿业大学(北京)/ 机电硕-6 / ZQT2000405103 / 李天鸽
# 编辑时间:2020/11/24 15:01
lst = ['hello','world',56,'hello','world',89]
print(lst[2]) #正向索引
print(lst[-4]) #逆向索引
print(lst[10]) #list index out of range | [
"1740636835@qq.com"
] | 1740636835@qq.com |
ce916f26f578fdb99b271247e9318c3a1ff43ee2 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/magic282_MXNMT/MXNMT-master/nmt/trainer.py | 69e74df257180b1d47c38ff9e717269eda66a341 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 4,878 | py | # -*- coding: utf-8 -*-
import mxnet as mx
import logging
import xconfig
from xsymbol import sym_gen
from xcallback import BatchCheckpoint, CheckBLEUBatch
from xutils import read_content, load_vocab, sentence2id
from xmetric import Perplexity
from masked_bucket_io import MaskedBucketSentenceIter
def get_LSTM_shape():
# initalize states for LSTM
forward_source_init_c = [('forward_source_l%d_init_c' % l, (xconfig.batch_size, xconfig.num_hidden)) for l in
range(xconfig.num_lstm_layer)]
forward_source_init_h = [('forward_source_l%d_init_h' % l, (xconfig.batch_size, xconfig.num_hidden)) for l in
range(xconfig.num_lstm_layer)]
backward_source_init_c = [('backward_source_l%d_init_c' % l, (xconfig.batch_size, xconfig.num_hidden)) for l in
range(xconfig.num_lstm_layer)]
backward_source_init_h = [('backward_source_l%d_init_h' % l, (xconfig.batch_size, xconfig.num_hidden)) for l in
range(xconfig.num_lstm_layer)]
source_init_states = forward_source_init_c + forward_source_init_h + backward_source_init_c + backward_source_init_h
target_init_c = [('target_l%d_init_c' % l, (xconfig.batch_size, xconfig.num_hidden)) for l in
range(xconfig.num_lstm_layer)]
# target_init_h = [('target_l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
target_init_states = target_init_c
return source_init_states, target_init_states
def train():
# load vocabulary
source_vocab = load_vocab(xconfig.source_vocab_path, xconfig.special_words)
target_vocab = load_vocab(xconfig.target_vocab_path, xconfig.special_words)
logging.info('source_vocab size: {0}'.format(len(source_vocab)))
logging.info('target_vocab size: {0}'.format(len(target_vocab)))
# get states shapes
source_init_states, target_init_states = get_LSTM_shape()
# build data iterator
data_train = MaskedBucketSentenceIter(xconfig.train_source, xconfig.train_target, source_vocab, target_vocab,
xconfig.buckets, xconfig.batch_size,
source_init_states, target_init_states, seperate_char='\n',
text2id=sentence2id, read_content=read_content,
max_read_sample=xconfig.train_max_samples)
# data_dev = MaskedBucketSentenceIter(xconfig.dev_source, xconfig.dev_source, source_vocab, target_vocab,
# xconfig.buckets, xconfig.batch_size,
# source_init_states, target_init_states, seperate_char='\n',
# text2id=sentence2id, read_content=read_content,
# max_read_sample=xconfig.dev_max_samples)
# Train a LSTM network as simple as feedforward network
optimizer = mx.optimizer.AdaDelta(clip_gradient=10.0)
# optimizer = mx.optimizer.Adam(clip_gradient=10.0)
_arg_params = None
if xconfig.use_resuming:
logging.info("Try resuming from {0} {1}".format(xconfig.resume_model_prefix, xconfig.resume_model_number))
try:
_, __arg_params, __ = mx.model.load_checkpoint(xconfig.resume_model_prefix, xconfig.resume_model_number)
logging.info("Resume succeeded.")
_arg_params = __arg_params
except:
logging.error('Resume failed.')
model = mx.model.FeedForward(ctx=xconfig.train_device,
symbol=sym_gen(len(source_vocab), len(target_vocab)),
num_epoch=xconfig.num_epoch,
optimizer=optimizer,
initializer=mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=3), # 35
arg_params=_arg_params,
)
# Fit it
model.fit(X=data_train,
eval_metric=mx.metric.np(Perplexity),
# eval_data=data_train,
batch_end_callback=[mx.callback.Speedometer(xconfig.batch_size, xconfig.show_every_x_batch),
BatchCheckpoint(save_name=xconfig.checkpoint_name,
per_x_batch=xconfig.checkpoint_freq_batch),
# CheckBLEUBatch(start_epoch=xconfig.eval_start_epoch,
# per_batch=xconfig.eval_per_x_batch, use_beam=True,
# beam_size=xconfig.beam_size),
],
epoch_end_callback=[mx.callback.do_checkpoint(xconfig.model_save_name, xconfig.model_save_freq),
])
| [
"659338505@qq.com"
] | 659338505@qq.com |
2ea793926c3f55a162d79188a1e2904df3494b9c | b8a13ecb7c0999954807e80c7470d8f752a3653b | /cryptopals-python3/challenge46.py | b0988b331a00237103847bce163a2fb9eff1d811 | [] | no_license | jbarcia/Python-Books | 59ca3d7b7fb1f2c1e3d1659f846032382af557a9 | 2106a2e5f56cdd4261bf870798a0a427d6137249 | refs/heads/master | 2021-01-19T00:24:59.727307 | 2017-01-05T00:07:13 | 2017-01-05T00:07:13 | 62,562,390 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import base64
import challenge39
pub, priv = challenge39.genKey(1024)
def parityOracle(c):
p = challenge39.decryptnum(priv, c)
return p % 2
def deducePlaintext(ciphertext, pub, parityOracle):
(e, n) = pub
low = 0
high = 1
denom = 1
c = challenge39.bytestonum(ciphertext)
k = pow(2, e, n)
for _ in range(n.bit_length()):
c = (c * k) % n
p = parityOracle(c)
d = high - low
low *= 2
high *= 2
denom *= 2
if p == 0:
high -= d
else:
low += d
hightext = challenge39.numtobytes(n * high // denom)
print(hightext)
return hightext
if __name__ == '__main__':
encodedPlaintext = b'VGhhdCdzIHdoeSBJIGZvdW5kIHlvdSBkb24ndCBwbGF5IGFyb3VuZCB3aXRoIHRoZSBGdW5reSBDb2xkIE1lZGluYQ=='
plaintext = base64.b64decode(encodedPlaintext)
ciphertext = challenge39.encryptbytes(pub, plaintext)
plaintext2 = deducePlaintext(ciphertext, pub, parityOracle)
if plaintext2 != plaintext:
raise Exception(b'Invalid plaintext ' + plaintext2)
| [
"jbarcia99@yahoo.com"
] | jbarcia99@yahoo.com |
1fd4d7e6c5eb6c651c307409b7b9b9bb3785d664 | 21e6f90c01546d85b3a9aba5e06c639b994cbb60 | /lib/util/conversions.py | 27e092c6b692a2c624f136742cd976ac7d234f57 | [
"BSD-3-Clause",
"MIT"
] | permissive | OGalOz/map_tnseq | eb3987815745519f2d70e78a974e27e047237257 | ee7e6ea5187251267ae59ac4caef61fb766f13ca | refs/heads/master | 2022-05-14T23:34:38.020294 | 2022-03-02T18:41:48 | 2022-03-02T18:41:48 | 233,708,607 | 0 | 1 | NOASSERTION | 2021-10-05T17:29:59 | 2020-01-13T22:41:12 | Python | UTF-8 | Python | false | false | 7,134 | py | #python
"""
This file is used to convert genbank files into other formats.
For example, converting a genbank file into a gene table.
"""
import os
import sys
import logging
from datetime import datetime
import json
"""
Info:
Test Mode takes only the first 100 lines of the file and runs it
against a model.
Inputs: fastq_fp: (str) Fastq filepath
"""
def run_test_mode(fastq_fp):
#First we try to take the first 1000 lines of the file
try:
with open(fastq_fp) as myfile:
first_lines = [next(myfile) for x in range(1000)]
except:
#There are fewer than a thousand lines in the file.
pass
new_file_str = "\n".join(first_lines)
logging.critical("TEST MODE FILE:")
for i in range(100):
logging.critical(first_lines[i])
logging.critical(new_file_str[:1000])
x = open(fastq_fp, "w")
x.write(new_file_str)
x.close()
def convert_from_poolfile_to_sequence_set_and_back(inp_fp_path,
op_path, conversion_type, description="", run_id=None):
"""
In this function we take either pool file or Sequence Set
and convert from one to the other. Sequence Set is output
and input as a JSON file, pool file is output and input
as a TSV file. Conversion_type is an int:
0 -> going from pool file to Sequence Set
1 -> going from Sequence Set to pool file
Args:
inp_fp_path: Path to either pool file (TSV) or Sequence Set (JSON)
op_path: Output path to the conversion output
conversion_type: (int) from [0,1]. 0 -> Pool file to Sequence Set
1 -> Sequence Set to Pool File.
description: (string) Optional string describing the set
Poolfile:
barcode(str):rcbarcode(str):nTot(int):n(int):scaffold(str):strand(str +/-):pos(int):
n2(int):scaffold2(int):strand2(str +/-):pos2(int):nPastEnd(int)
"""
if conversion_type not in [0,1]:
raise Exception("Cannot recognize conversion type: Must be int." + \
"Val: {}".format(conversion_type))
if conversion_type == 0:
# Going from poolfile to Sequence Set
if run_id is None:
run_id = "MapTnSeq_Barcodes_run_on_" + \
str(datetime.now()).replace(' ', '_'),
# output dict
sequence_set = {
"sequence_set_id": run_id,
"description": "MapTnSeq (RBTNSEQ) mapping of barcodes to a " + \
"genome. Explanations of values: 'nTot' is total " + \
"number of times this barcode was counted." + \
" 'n' is number of times this barcode was counted" + \
" at this location. 'scf' is scaffold name." + \
" 'strand' is strand (+ or -). 'pos' is nucleotide" + \
" position. 'n2' is number of times found at second" + \
" highest counted location. 'scf2' is second highest" + \
" location scaffold, 'strand2' similarly, etc." + \
" 'nPastEnd' means number of times this barcode was" + \
" found next to the next part of the plasmid (barcode" + \
" wasn't inserted into the genome without the rest " + \
" of the plasmid).\n" + \
" User Description (optional): {}".format(description)
}
sequence_list = []
pool_FH = open(inp_fp_path, "r")
header = pool_FH.readline().rstrip()
c_line = pool_FH.readline()
i = 1
while c_line != "":
c_lst = c_line.split('\t')
nPastEnd = c_lst[-1].rstrip()
barcode, rcbarcode, nTot, n, scf, strnd, pos = c_lst[:7]
n2, scf2, strnd2, pos2 = c_lst[7:-1]
# desc_str holds all the information needed to reconstruct pool file
desc_str = "nTot:{};n:{};scf:{};strand:{};pos:{};".format(
nTot, n, scf, strnd, pos)
desc_str += "n2:{};scf2:{};strand2:{};pos2:{};".format(
n2, scf2, strnd2, pos2)
desc_str += "nPastEnd:" + nPastEnd
sequence_list.append(
{
"sequence_id": "MapTnSeq_barcode_" + str(i),
"description": desc_str,
"sequence": barcode
})
c_line = pool_FH.readline()
i += 1
pool_FH.close()
sequence_set["sequences"] = sequence_list
with open(op_path, "w") as g:
g.write(json.dumps(sequence_set, indent=2))
logging.info("Wrote Sequence Set JSON file to " + op_path)
elif conversion_type == 1:
# Going from Sequence Set to Pool File
if inp_fp_path.split(".")[-1] != "json":
raise Exception("Sequence Set indicated but not JSON file")
sequence_set_d = json.loads(open(inp_fp_path).read())
out_pool_FH = open(op_path, "w")
out_pool_FH.write("barcode\trcbarcode\tnTot\tn\tscaffold\tstrand\tpos\t" + \
"n2\tscaffold2\tstrand2\tpos2\tnPastEnd\n")
seq_list = sequence_set_d["sequences"]
for seq in seq_list:
desc_str = seq["description"]
barcode = seq["sequence"]
tsl_d = {"A":"T", "T":"A", "G":"C", "C":"G"}
rcbc1 = [tsl_d[x] for x in list(barcode)]
rcbc1.reverse()
rcbarcode = "".join(rcbc1)
out_pool_FH.write(barcode + "\t" + rcbarcode + "\t")
items = [x.split(":")[1] for x in desc_str.split(";")]
out_pool_FH.write("\t".join(items) + "\n")
out_pool_FH.close()
logging.info("Wrote Pool File from Sequence Set at " + op_path)
return None
def main():
args = sys.argv
if args[-1] == "2":
logging.basicConfig(level=logging.DEBUG)
if args[-2] not in ["0", "1"]:
print("python3 conversions.py inp_fp out_fp conversion_type(0/1) 2")
else:
inp_fp = args[1]
out_fp = args[2]
conversion_type = args[3]
convert_from_poolfile_to_sequence_set_and_back(inp_fp,
out_fp, int(conversion_type), description="", run_id=None)
else:
print("python3 conversions.py inp_fp out_fp conversion_type(0/1) 2")
"""
Not in use
Inputs: custom_model_string (str) String of custom model.
Should look like the other models (2 lines, etc)
Outputs: tested_model_string (str) String of custom model.
"""
def check_custom_model(custom_model_string):
if len(custom_model_string) < 2:
raise Exception("Custom Model form incorrect, it contains fewer than 2 "
"characters while it should be at least 20 bp long.")
if len(custom_model_string.split("\n")) > 3:
raise Exception("Custom Model form incorrect- max amount of lines is 2.")
tested_model_string = custom_model_string
return tested_model_string
if __name__=="__main__":
main()
| [
"ogaloz@lbl.gov"
] | ogaloz@lbl.gov |
58677c821bfba1ed04eadabd420fd3df4576b963 | 1a5088c7858f24907f8833d4f7889ddae8203a87 | /etc/config.py | 4bd01b4d25f86c735532037d26df69c32d13e15a | [] | no_license | datatalking/roguelike | c0197e63fffc338a4d4edb14f9bb733566b029cb | dd7cda297d41c00853d015a082dd39aa14c1b8e4 | refs/heads/master | 2020-03-11T06:24:12.500005 | 2018-04-17T01:16:04 | 2018-04-17T01:16:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | SCREEN_WIDTH = 80
SCREEN_HEIGHT = 50
FLOOR_CONFIG = {
'width': 80,
'height': 43,
'max_rooms': 50,
}
ROOM_CONFIG = {
'width': 10,
'height': 10,
'max_rectangles': 5,
'max_rectangle_width': 3,
'max_rectangle_height': 3,
}
MAP_CONFIG = {
'max_monsters_per_room': 3,
'max_items_per_room': 2
}
PANEL_CONFIG = {
'bar_width': 20,
'height': 7,
}
PANEL_CONFIG['y'] = SCREEN_HEIGHT - PANEL_CONFIG['height']
MESSAGE_CONFIG = {
'x': PANEL_CONFIG['bar_width'] + 2,
'width': SCREEN_WIDTH - PANEL_CONFIG['bar_width'] - 2,
'height': PANEL_CONFIG['height'] - 1
}
FOV_CONFIG = {
"algorithm": 'BASIC',
"light_walls": True,
"radius": 10
}
| [
"matthew.drury.83@gmail.com"
] | matthew.drury.83@gmail.com |
7c541336e794e92938f2b74246ece20ead4c6d9b | e97c00e9a74e1e3f8fcc6021a30527957c0aab61 | /solutions/0_Intro_OT_sol.py | a61c7e563b24de84ae4f21a8bb8363c24936418e | [
"MIT"
] | permissive | PeterouZh/OTML_DS3_2018 | 9264ae68ac0a78319ef271ee0caa15bb33ef599e | 0c7c727528d9d88544317c2b0327d2c3d5c5a9eb | refs/heads/master | 2020-03-28T19:27:58.126010 | 2018-09-18T03:28:11 | 2018-09-18T03:28:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,008 | py |
# coding: utf-8
# # Introduction to Optimal Transport with Python
#
# #### *Rémi Flamary, Nicolas Courty*
# ## POT installation
# + Install with pip:
# ```bash
# pip install pot
# ```
# + Install with conda
# ```bash
# conda install -c conda-forge pot
# ```
# ## POT Python Optimal Transport Toolbox
#
# #### Import the toolbox
# In[1]:
import numpy as np # always need it
import scipy as sp # often use it
import pylab as pl # do the plots
import ot # ot
# #### Getting help
#
# Online documentation : [http://pot.readthedocs.io](http://pot.readthedocs.io)
#
# Or inline help:
#
# In[2]:
help(ot.dist)
# ## First OT Problem
#
# We will solve the Bakery/Cafés problem of transporting croissants from a number of Bakeries to Cafés in a City (In this case Manhattan). We did a quick google map search in Manhattan for bakeries and Cafés:
#
# 
#
# We extracted from this search their positions and generated fictional production and sale number (that both sum to the same value).
#
# We have acess to the position of Bakeries ```bakery_pos``` and their respective production ```bakery_prod``` which describe the source distribution. The Cafés where the croissants are sold are defiend also by their position ```cafe_pos``` and ```cafe_prod```. For fun we also provide a map ```Imap``` that will illustrate the position of these shops in the city.
#
#
# Now we load the data
#
#
# In[3]:
data=np.load('data/manhattan.npz')
bakery_pos=data['bakery_pos']
bakery_prod=data['bakery_prod']
cafe_pos=data['cafe_pos']
cafe_prod=data['cafe_prod']
Imap=data['Imap']
print('Bakery production: {}'.format(bakery_prod))
print('Cafe sale: {}'.format(cafe_prod))
print('Total croissants : {}'.format(cafe_prod.sum()))
# #### Plotting bakeries in the city
#
# Next we plot the position of the bakeries and cafés on the map. The size of the circle is proportional to their production.
#
# In[4]:
pl.figure(1,(8,7))
pl.clf()
pl.imshow(Imap,interpolation='bilinear') # plot the map
pl.scatter(bakery_pos[:,0],bakery_pos[:,1],s=bakery_prod,c='r', edgecolors='k',label='Bakeries')
pl.scatter(cafe_pos[:,0],cafe_pos[:,1],s=cafe_prod,c='b', edgecolors='k',label='Cafés')
pl.legend()
pl.title('Manhattan Bakeries and Cafés');
# #### Cost matrix
#
#
# We compute the cost matrix between the bakeries and the cafés, this will be the transport cost matrix. This can be done using the [ot.dist](http://pot.readthedocs.io/en/stable/all.html#ot.dist) that defaults to squared euclidean distance but can return other things such as cityblock (or manhattan distance).
#
#
# In[5]:
C=ot.dist(bakery_pos,cafe_pos,'sqeuclidean')
C=ot.dist(bakery_pos,cafe_pos,'cityblock')
pl.figure(3,(7,7))
pl.imshow(C);
pl.xlabel('Cafés')
pl.ylabel('Bakeries')
pl.title('Cost matrix')
pl.colorbar();
# #### Solving the OT problem with [ot.emd](http://pot.readthedocs.io/en/stable/all.html#ot.emd)
# In[6]:
G=ot.emd(bakery_prod,cafe_prod,C)
pl.figure(4,(14,7))
pl.subplot(1,2,1)
pl.imshow(C);
pl.xlabel('Cafés')
pl.ylabel('Bakeries')
pl.title('Cost matrix')
pl.colorbar();
pl.subplot(1,2,2)
pl.imshow(G);
pl.xlabel('Cafés')
pl.ylabel('Bakeries')
pl.title('OT matrix')
pl.colorbar();
#np.abs(G-G2).max()
# #### Transportation plan vizualization
#
# A good vizualization of the OT matrix in the 2D plane is to denote the transportation of mass between a Bakery and a Café by a line. This can easily be done with a double ```for``` loop.
#
# In order to make it more interpretable one can also use the ```alpha``` parameter of plot and set it to ```alpha=G[i,j]/G[i,j].max()```.
# In[7]:
thr=0.1
mx=G.max()
pl.figure(5,(8,7))
pl.clf()
pl.imshow(Imap,interpolation='bilinear') # plot the map
pl.scatter(bakery_pos[:,0],bakery_pos[:,1],s=bakery_prod,c='r', edgecolors='k',label='Bakeries')
pl.scatter(cafe_pos[:,0],cafe_pos[:,1],s=cafe_prod,c='b', edgecolors='k',label='Cafés')
for i in range(G.shape[0]):
for j in range(G.shape[1]):
if G[i,j]>thr:
pl.plot([bakery_pos[i,0],cafe_pos[j,0]],[bakery_pos[i,1],cafe_pos[j,1]],'k',alpha=G[i,j]/mx)
pl.legend()
pl.title('Transport between Bakeries and Cafés');
# #### OT loss and dual variables
#
# The resulting wasserstein loss loss is of the form:
#
# $W=\sum_{i,j}\gamma_{i,j}C_{i,j}$
#
# where $\gamma$ is the optimal transport matrix.
#
# In[8]:
loss=np.sum(C*G)
loss
# #### Regularized OT with SInkhorn
#
# The Sinkhorn algorithm is very simple to code. You can implement it directly using the following pseudo-code:
#
# 
#
# An alternative is to use the POT toolbox with [ot.sinkhorn](http://pot.readthedocs.io/en/stable/all.html#ot.sinkhorn)
#
# Be carefull to numerical problems. A good pre-provcessing for Sinkhorn is to divide the cost matrix ```C```
# by its maximum value.
# In[9]:
reg=1e-2
C0=C/C.max()
#G0=ot.sinkhorn(bakery_prod,cafe_prod,C0,reg)
K=np.exp(-C0/reg)
G=K
niter=100
u=np.ones(C.shape[0])
for i in range(niter):
v=cafe_prod/K.T.dot(u)
u=bakery_prod/K.dot(v)
G=u[:,None]*K*v[None,:]
loss=np.sum(C*G)
pl.figure(4,(14,7))
pl.subplot(1,2,1)
pl.imshow(C);
pl.xlabel('Cafés')
pl.ylabel('Bakeries')
pl.title('Cost matrix')
pl.colorbar();
pl.subplot(1,2,2)
pl.imshow(G);
pl.xlabel('Cafés')
pl.ylabel('Bakeries')
pl.title('OT matrix')
pl.colorbar();
thr=0.1
mx=G.max()
pl.figure(5,(8,7))
pl.clf()
pl.imshow(Imap,interpolation='bilinear') # plot the map
pl.scatter(bakery_pos[:,0],bakery_pos[:,1],s=bakery_prod,c='r', edgecolors='k',label='Bakeries')
pl.scatter(cafe_pos[:,0],cafe_pos[:,1],s=cafe_prod,c='b', edgecolors='k',label='Cafés')
for i in range(G.shape[0]):
for j in range(G.shape[1]):
if G[i,j]>thr:
pl.plot([bakery_pos[i,0],cafe_pos[j,0]],[bakery_pos[i,1],cafe_pos[j,1]],'k',alpha=G[i,j]/mx)
pl.legend()
pl.title('Transport between Bakeries and Cafés');
#np.abs(G-G2).max()
loss
| [
"zhoupengcv@sjtu.edu.cn"
] | zhoupengcv@sjtu.edu.cn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.