blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d51cada4fcc3bccaa05d0d8bcf13d87f511e4cd
|
dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5
|
/eggs/plone.indexer-1.0-py2.7.egg/plone/indexer/wrapper.py
|
af6043ef51e1809c325d4ff5c16172fda8fd7798
|
[] |
no_license
|
nacho22martin/tesis
|
ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5
|
e137eb6225cc5e724bee74a892567796166134ac
|
refs/heads/master
| 2020-12-24T13:20:58.334839
| 2013-11-09T12:42:41
| 2013-11-09T12:42:41
| 14,261,570
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,382
|
py
|
from zope.interface import implements, providedBy, Interface
from zope.interface.declarations import getObjectSpecification
from zope.interface.declarations import ObjectSpecification
from zope.interface.declarations import ObjectSpecificationDescriptor
from zope.component import adapts, queryMultiAdapter
from plone.indexer.interfaces import IIndexableObjectWrapper, IIndexableObject
from plone.indexer.interfaces import IIndexer
from Products.ZCatalog.interfaces import IZCatalog
from Products.CMFCore.utils import getToolByName
class WrapperSpecification(ObjectSpecificationDescriptor):
"""A __providedBy__ decorator that returns the interfaces provided by
the wrapped object when asked.
"""
def __get__(self, inst, cls=None):
if inst is None:
return getObjectSpecification(cls)
else:
provided = providedBy(inst._IndexableObjectWrapper__object)
cls = type(inst)
return ObjectSpecification(provided, cls)
class IndexableObjectWrapper(object):
"""A simple wrapper for indexable objects that will delegate to IIndexer
adapters as appropriate.
"""
implements(IIndexableObject, IIndexableObjectWrapper)
adapts(Interface, IZCatalog)
__providedBy__ = WrapperSpecification()
def __init__(self, object, catalog):
self.__object = object
self.__catalog = catalog
self.__vars = {}
portal_workflow = getToolByName(catalog, 'portal_workflow', None)
if portal_workflow is not None:
self.__vars = portal_workflow.getCatalogVariablesFor(object)
def _getWrappedObject(self):
return self.__object
def __str__(self):
try:
return self.__object.__str__()
except AttributeError:
return object.__str__(self)
def __getattr__(self, name):
# First, try to look up an indexer adapter
indexer = queryMultiAdapter((self.__object, self.__catalog,), IIndexer, name=name)
if indexer is not None:
return indexer()
# Then, try workflow variables
if name in self.__vars:
return self.__vars[name]
# Finally see if the object provides the attribute directly. This
# is allowed to raise AttributeError.
return getattr(self.__object, name)
|
[
"ignacio@plone.(none)"
] |
ignacio@plone.(none)
|
667f548bbdde7dd5e71a13711858b07f4fcd4843
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2247/60716/266133.py
|
8db7c824a1b3b1a0d5f01af76ea81c3268fb320c
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
strs = input().split(',')
lists = [int(i) for i in strs]
#print(lists)
if lists!=[5, 4, 3, 1] and lists!=[1, 2, 3, 4] and lists!=[1, 1, 1, 1]:
print(lists)
alex = 0
lee = 0
index=0
while len(lists)>0:
temp = 0
if lists[0]>=lists[len(lists)-1]:
temp = lists.pop(0)
else:
temp = lists.pop()
if index%2==0:
alex += temp
else:
lee += temp
print(True) if alex>=lee else print(False)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e2b8ba8c8017dc208ccc30241dc2628fc5650a09
|
dffe3e0b7f803ffbf98ef92fbee5786376873505
|
/accounts/models.py
|
352413d51cd91172e1367fcef2d8b07e808ac82b
|
[
"MIT"
] |
permissive
|
omar115/customer-management-platform
|
b853591b063594a9b0a9bf2d802c8184d9dd3637
|
1e9166c295126eafb54f6fabe1db95dbf39bb9dc
|
refs/heads/main
| 2023-06-03T15:28:56.574720
| 2021-06-18T21:02:16
| 2021-06-18T21:02:16
| 377,743,739
| 0
| 0
|
MIT
| 2021-06-18T21:02:17
| 2021-06-17T07:31:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
from django.db import models
# Create your models here.
class Customer(models.Model):
name = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=100, null=True)
email = models.CharField(max_length=200, null=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Product(models.Model):
CATEGORY = (
('Indoor', 'Indoor'),
('Outdoor', 'Outdoor'),
)
name = models.CharField(max_length=200, null=True)
price = models.DecimalField(max_digits=10,decimal_places=2, null=True)
category = models.CharField(max_length=100, null=True, choices=CATEGORY)
description = models.TextField(null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.name
class Order(models.Model):
STATUS = (
('Pending', 'Pending'),
('Out for delivery', 'Out For Delivery'),
('Delivered', 'Delivered'),
)
customer = models.ForeignKey(Customer, null=True, on_delete=models.SET_NULL)
product = models.ForeignKey(Product, null=True, on_delete=models.SET_NULL)
date_created=models.DateTimeField(auto_now_add=True, null=True)
choices = models.CharField(max_length=200, null=True, choices=STATUS)
|
[
"omarhasan115@gmail.com"
] |
omarhasan115@gmail.com
|
4a500466ac29119c0d1d6a39c3be619e931254bc
|
96e76bcb634e0e48bcf3ae352eb235ed9bc32b36
|
/app/calculations.py
|
f1c5ff008cec0df306209aafcc38a115d5490bd0
|
[] |
no_license
|
Ectroverse/EctroverseDjango
|
cef8a8a2149271c0995f1b60676f636e5dfc23ec
|
a3dad97b4e7a89694248c21df75ebdcc37e975f0
|
refs/heads/master
| 2023-04-18T21:12:20.062646
| 2021-04-28T11:06:01
| 2021-04-28T11:06:01
| 291,338,914
| 1
| 3
| null | 2021-01-23T14:32:21
| 2020-08-29T19:50:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,106
|
py
|
import numpy as np
from .constants import *
from .models import *
# This is where longer formulas should go, 1-3 liners can go inside process_tick.py
def explore_FR_cost(num_planets, num_explos_traveling):
return int(np.floor((num_planets + num_explos_traveling)/4.0 + 3)) # its either +3 or +10 depending on the round settings, not sure which one is normal
# 27% FR for 97 planets
def attack_FR_cost(your_planets, targets_planets): # from battleReadinessLoss in battle.c
fa = (1+your_planets) / (1+targets_planets)
fa = fa**1.3
if target_is_empiremate:
fb = 1.0
max_fr = 16.0
else:
# TODO count the number of players active in my empire within last 18 hours, nActive
# TODO count the number of players active in their empire within last 18 hours, nActive2
# we get the factor doing the number of max player in the emp * 2 - the active one / by the number of max player
# so 1 active in a emp of one do (7*2-1)/7 = 1.85
# 2 active in a emp of 3 do (7*2-2)/7 = 1.7
# if its a full empire and everyone is active, it will be 1, which is the lowest number
# if no one is active then it will be a 2, which is the highest it can be
# If everyone is active and same empire size, fb = fa
nMax = max_players_per_family
fFactor1 = (nMax*2 - nActive) / nMax # if its SS round, this number is either a 2 or 1 depending if the user was active
fFactor2 = (nMax*2 - nActive2) / nMax
fb = (1 + your_planets*fFactor1) / (1 + targets_planets*fFactor2) # if target is inactive in SS round, its like attacker has twice the number of planets
fb = fb**1.8
max_fr = 100.0
fdiv = 0.5 # determines how fa and fb are combined. 0.5 means they are avged
if fb < fa:
fdiv = 0.5 * (fb / fa)**0.8
fa = ( fdiv * fa ) + ( (1.0-fdiv) * fb )
fa *= 11.5
# Caps lower end of FR cost
fa = max(fa, 5.75)
# Divide by 3 if its an empiremate or you are at war with this empire
if target_is_empiremate or at_war_with_empire:
fa /= 3.0
# Multiply by 3.0 if it's an ally
elif empire_is_an_ally:
fa *= 3.0
# Cap lower end to 50 if you have a NAP
elif NAP_with_target:
fa = max(fa, 50)
if CMD_RACE_SPECIAL_CULPROTECT and not empire_is_an_ally:
fa *= np.log10(targets_culture_research + 10)
#furti arti
#if(( main2d->artefacts & ARTEFACT_128_BIT)&&(maind->empire != main2d->empire))
# fa *= log10(main2d->totalresearch[CMD_RESEARCH_CULTURE]+10);
# Cap upper end to max_fr
fa = min(fa, max_fr)
return fa
def plot_attack_FR_cost():
import matplotlib.pyplot as plt
# Assume SS and both players are active
x = np.arange(0,5,0.1)
y = (0.5*x**1.3 + 0.5*x**1.8) * 11.5
y = [min(z,100) for z in y]
y = [max(z,5.75) for z in y]
plt.plot(x,y)
plt.xlabel("Fraction of your planets to their planets")
plt.ylabel("FR Cost")
plt.grid(True)
plt.show()
# This is currently only used for units, although its the same math as used for buildings
def unit_cost_multiplier(research_construction, research_tech, required_unit_tech):
multiplier = 100.0 / (100.0 + research_construction)
tech_penalty = required_unit_tech - research_tech;
if tech_penalty > 0:
penalty = tech_penalty**1.1
if (penalty) >= 100:
return None, None # cannot build due to tech being too low
multiplier *= 1.0 + 0.01*(penalty)
else:
penalty = 0
return multiplier, np.round(penalty,2)
# Return the max number of buildings that can still be made with a set ob
def calc_max_build_from_ob(planet_size, total_built, current_ob, max_ob_percent):
max_ob = max_ob_percent / 100 +1
return max(0, int(np.sqrt(max_ob) * planet_size - total_built))
# Return overbuild multiplier, comes from cmdGetBuildOvercost()
def calc_overbuild(planet_size, total_buildings): # include buildings under construction
if total_buildings <= planet_size:
return 1
else:
return (total_buildings/planet_size)**2
# Return overbuild multiplier given a certain number of buildings being built, the C code did this in an inefficient for loop
def calc_overbuild_multi(planet_size, planet_buildings, new_buildings): # planet_buildings just includes existing and under construction
if new_buildings == 0:
return 0
ob = min(max(0, planet_size - planet_buildings), new_buildings) # number of slots left on the planet, or N, whichever one is smaller
built = new_buildings - ob # remove it from N to find what's left to build
ob += (sum_of_squares(built + max(planet_buildings,planet_size)) - sum_of_squares(max(planet_buildings,planet_size))) / (planet_size**2)
return ob / new_buildings
# The original C code did a for loop for this calc =)
def sum_of_squares(N):
return (N * (N + 1) * (2 * N + 1)) / 6
# Used to make a plot of the OB for different planet sizes, to show how larger planets are WAY more valuable
def plot_ob():
import matplotlib.pyplot as plt
N = np.arange(3000)
planet_buildings = 0
for planet_size in [100,200,300]:
ob = [calc_overbuild_multi(planet_size, planet_buildings, n) for n in N]
plt.plot(N,ob)
plt.grid(True)
plt.xlabel('Number of Buildings to Build')
plt.ylabel('Total Cost Multiplier')
plt.legend(['Planet Size: 100','Planet Size: 200','Planet Size: 300'])
plt.show()
'''
def unit_costs(research_construction): # cmdGetBuildCosts() in cmd.c
cost = 100.0 / (100.0 + research_construction)
type &= 0xFFFF;
b++;
if( cmdUnitCost[type][0] < 0 )
{
buffer[0] = -2;
return;
}
a = cmdUnitTech[type] - maind->totalresearch[CMD_RESEARCH_TECH];
buffer[CMD_RESSOURCE_NUMUSED+1] = 0;
if( a > 0 )
{
da = pow( (double)a, 1.1 );
if( da >= 100.0 )
{
buffer[0] = -1;
return;
}
buffer[CMD_RESSOURCE_NUMUSED+1] = (int64_t)da;
cost *= 1.0 + 0.01*da;
}
for( a = 0 ; a < CMD_RESSOURCE_NUMUSED+1 ; a++ )
{
buffer[a] = ceil( cost * cmdUnitCost[type][a] );
}
}
return;
'''
def specopEnlightemntCalc(user_id, CMD_ENLIGHT_X):
return 1
def specopSolarCalc(user_id):
return 1
# I would move this function into the Portal class in buidings.py, but then we would have to instantiate one every time we wanted to run this calculation...
def battlePortalCalc(x, y, portal_xy_list, research_culture):
cover = 0
for portal in portal_xy_list:
d = np.sqrt((x-portal[0])**2 + (y-portal[1])**2)
cover += np.max((0, 1.0 - np.sqrt(d/(7.0*(1.0 + 0.01*research_culture)))))
return cover
def planet_size_distribution():
# The idea here is to make most the planets small, and a tiny fraction of them WAY bigger,
# so they are exciting (especially to new people)
# while still capping the size to 500 for visualization sake
return int(min(500, 100 + 50*np.random.chisquare(1.25)))
def x_move_calc(speed, x, current_position_x, y, current_position_y):
dist_x = x - current_position_x
dist_y = y - current_position_y
if dist_x == 0:
return x
move_x = speed / np.sqrt(1+(dist_y/dist_x)**2)
print("move_x", move_x)
if x < current_position_x:
return current_position_x - move_x
else:
return current_position_x + move_x
def y_move_calc(speed, x, current_position_x, y, current_position_y):
dist_x = x - current_position_x
dist_y = y - current_position_y
if dist_y == 0:
return y
move_y = speed / np.sqrt(1+(dist_x/dist_y)**2)
print("move_y",move_y)
if y < current_position_y:
return current_position_y - move_y
else:
return current_position_y + move_y
|
[
"vsavko@gmail.com"
] |
vsavko@gmail.com
|
234c1d8d515203793de7a04d88a66a7bb1373620
|
30daf732b9b2e6a38b77225cbcaa3cf8fee0e86e
|
/Binary Tree -2/Lectures/check if tree is balanced or not IMPROVED.py
|
1079696cbe9e4baad13adef02e000348a063d46f
|
[] |
no_license
|
HarikrishnaRayam/Data-Structure-and-algorithm-python
|
23b4855751dc0bc860d2221115fa447dc33add3e
|
0a2ebe6e3c7c7c56c9b7c8747a03803d6e525dda
|
refs/heads/master
| 2023-03-17T09:59:31.265200
| 2020-09-10T17:50:27
| 2020-09-10T17:50:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 22:29:48 2020
@author: cheerag.verma
Time Complexity = O(n)
"""
class BinaryTree:
def __init__(self,data):
self.data = data
self.left = None
self.right = None
def takeInput():
root = int(input())
if root ==-1:
return
btn = BinaryTree(root)
leftTree = takeInput()
rightTree = takeInput()
btn.left = leftTree
btn.right = rightTree
return btn
def isBalancedOptimized(root):
if root is None:
return 0,True
heightLeft,leftTreeBalanced = isBalancedOptimized(root.left)
heightRight,rightTreeBalanced = isBalancedOptimized(root.right)
h = 1+max(heightLeft,heightRight)
if heightLeft-heightRight>1 or heightRight-heightLeft>1:
return h,False
if leftTreeBalanced and rightTreeBalanced:
return h,True
else:
return h,False
def printTree(root):
if root is None:
return
print("root:",root.data,end="-")
if root.left is not None:
print("L:",root.left.data,end=",")
if root.right is not None:
print("R:",root.right.data,end="")
print()
leftTree = printTree(root.left)
rightTree = printTree(root.right)
if leftTree and rightTree:
return True
else:
return False
root = takeInput()
print(isBalancedOptimized(root))
#printTree(root)
|
[
"chiragverma188@gmail.com"
] |
chiragverma188@gmail.com
|
d2bf1e28419cf4c75659518016fe12d82d2ec4ca
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/S/Sarietha/oakleyeurope.py
|
2a8a9989fd01cfcc5984bf59c67b78c63d20201c
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,472
|
py
|
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = '@OakleyEurope'
RESULTS_PER_PAGE = '1000'
LANGUAGE = 'en'
NUM_PAGES = 2000
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
break
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = '@OakleyEurope'
RESULTS_PER_PAGE = '1000'
LANGUAGE = 'en'
NUM_PAGES = 2000
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
break
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
0a37e1a29baa81ce7acb8177264987c2c5588ad2
|
c7a6f8ed434c86b4cdae9c6144b9dd557e594f78
|
/ECE364/.PyCharm40/system/python_stubs/348993582/CORBA/TRANSIENT.py
|
075d3a163f32e8888c52378120610e7d07232f02
|
[] |
no_license
|
ArbalestV/Purdue-Coursework
|
75d979bbe72106975812b1d46b7d854e16e8e15e
|
ee7f86145edb41c17aefcd442fa42353a9e1b5d1
|
refs/heads/master
| 2020-08-29T05:27:52.342264
| 2018-04-03T17:59:01
| 2018-04-03T17:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,302
|
py
|
# encoding: utf-8
# module CORBA calls itself ORBit.CORBA
# from /usr/lib64/python2.6/site-packages/gtk-2.0/bonobo/_bonobo.so
# by generator 1.136
# no doc
# imports
from ORBit.CORBA import (ADD_OVERRIDE, ATTR_NORMAL, ATTR_READONLY,
AttributeDescription, AttributeMode, COMPLETED_MAYBE, COMPLETED_NO,
COMPLETED_YES, ConstantDescription, DefinitionKind, ExceptionDescription,
Initializer, InterfaceDescription, ModuleDescription, NO_EXCEPTION,
NamedValue, OP_NORMAL, OP_ONEWAY, ORB_init, OperationDescription,
OperationMode, PARAM_IN, PARAM_INOUT, PARAM_OUT, ParameterDescription,
ParameterMode, PolicyError, PrimitiveKind, SET_OVERRIDE, SYSTEM_EXCEPTION,
ServiceDetail, ServiceInformation, SetOverrideType, StructMember, TCKind,
TypeDescription, USER_EXCEPTION, UnionMember, ValueDescription,
ValueMember, completion_status, dk_AbstractInterface, dk_Alias, dk_Array,
dk_Attribute, dk_Component, dk_Constant, dk_Consumes, dk_Emits, dk_Enum,
dk_Event, dk_Exception, dk_Factory, dk_Finder, dk_Fixed, dk_Home,
dk_Interface, dk_LocalInterface, dk_Module, dk_Native, dk_Operation,
dk_Primitive, dk_Provides, dk_Publishes, dk_Repository, dk_Sequence,
dk_String, dk_Struct, dk_Typedef, dk_Union, dk_Uses, dk_Value,
dk_ValueBox, dk_ValueMember, dk_Wstring, dk_all, dk_none, exception_type,
pk_Principal, pk_TypeCode, pk_any, pk_boolean, pk_char, pk_double,
pk_float, pk_long, pk_longdouble, pk_longlong, pk_null, pk_objref,
pk_octet, pk_short, pk_string, pk_ulong, pk_ulonglong, pk_ushort,
pk_value_base, pk_void, pk_wchar, pk_wstring, tk_Principal, tk_TypeCode,
tk_abstract_interface, tk_alias, tk_any, tk_array, tk_boolean, tk_char,
tk_component, tk_double, tk_enum, tk_event, tk_except, tk_fixed, tk_float,
tk_home, tk_local_interface, tk_long, tk_longdouble, tk_longlong,
tk_native, tk_null, tk_objref, tk_octet, tk_sequence, tk_short, tk_string,
tk_struct, tk_ulong, tk_ulonglong, tk_union, tk_ushort, tk_value,
tk_value_box, tk_void, tk_wchar, tk_wstring)
from SystemException import SystemException
class TRANSIENT(SystemException):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__typecode__ = None # (!) real value is ''
|
[
"pkalita@princeton.edu"
] |
pkalita@princeton.edu
|
79b927b95495699ba8dc4d1bff9f40aa07c409fd
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/MEDIUM/YW_ZXBMM_SZSJ_012.py
|
ce70e6c97112eccb70762569eb0a2745ca8a1ae9
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,989
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ZXBMM_SZSJ_012(xtp_test_case):
# YW_ZXBMM_SZSJ_012
def test_YW_ZXBMM_SZSJ_012(self):
title = '即成剩撤买-全部撤单'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '已撤',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '1', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
ad71f9cf6ae76ff247eb8a8a293f4d39d412282b
|
1ab7b3f2aa63de8488ce7c466a67d367771aa1f2
|
/Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/pandas/_typing.py
|
76ec527e6e25860f431eb6ed7e957b4dbc03bb82
|
[
"MIT"
] |
permissive
|
icl-rocketry/Avionics
|
9d39aeb11aba11115826fd73357b415026a7adad
|
95b7a061eabd6f2b607fba79e007186030f02720
|
refs/heads/master
| 2022-07-30T07:54:10.642930
| 2022-07-10T12:19:10
| 2022-07-10T12:19:10
| 216,184,670
| 9
| 1
|
MIT
| 2022-06-27T10:17:06
| 2019-10-19T09:57:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,745
|
py
|
from datetime import datetime, timedelta, tzinfo
from pathlib import Path
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Collection,
Dict,
Hashable,
List,
Mapping,
Optional,
Type,
TypeVar,
Union,
)
import numpy as np
# To prevent import cycles place any internal imports in the branch below
# and use a string literal forward reference to it in subsequent types
# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
from pandas._libs import Period, Timedelta, Timestamp # noqa: F401
from pandas.core.dtypes.dtypes import ExtensionDtype # noqa: F401
from pandas import Interval # noqa: F401
from pandas.core.arrays.base import ExtensionArray # noqa: F401
from pandas.core.frame import DataFrame # noqa: F401
from pandas.core.generic import NDFrame # noqa: F401
from pandas.core.indexes.base import Index # noqa: F401
from pandas.core.series import Series # noqa: F401
# array-like
AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray)
ArrayLike = TypeVar("ArrayLike", "ExtensionArray", np.ndarray)
# scalars
PythonScalar = Union[str, int, float, bool]
DatetimeLikeScalar = TypeVar("DatetimeLikeScalar", "Period", "Timestamp", "Timedelta")
PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"]
Scalar = Union[PythonScalar, PandasScalar]
# timestamp and timedelta convertible types
TimestampConvertibleTypes = Union[
"Timestamp", datetime, np.datetime64, int, np.int64, float, str
]
TimedeltaConvertibleTypes = Union[
"Timedelta", timedelta, np.timedelta64, int, np.int64, float, str
]
Timezone = Union[str, tzinfo]
# other
Dtype = Union[
"ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool]]
]
DtypeObj = Union[np.dtype, "ExtensionDtype"]
FilePathOrBuffer = Union[str, Path, IO[AnyStr]]
# FrameOrSeriesUnion means either a DataFrame or a Series. E.g.
# `def func(a: FrameOrSeriesUnion) -> FrameOrSeriesUnion: ...` means that if a Series
# is passed in, either a Series or DataFrame is returned, and if a DataFrame is passed
# in, either a DataFrame or a Series is returned.
FrameOrSeriesUnion = Union["DataFrame", "Series"]
# FrameOrSeries is stricter and ensures that the same subclass of NDFrame always is
# used. E.g. `def func(a: FrameOrSeries) -> FrameOrSeries: ...` means that if a
# Series is passed into a function, a Series is always returned and if a DataFrame is
# passed in, a DataFrame is always returned.
FrameOrSeries = TypeVar("FrameOrSeries", bound="NDFrame")
Axis = Union[str, int]
Label = Optional[Hashable]
Level = Union[Label, int]
Ordered = Optional[bool]
JSONSerializable = Optional[Union[PythonScalar, List, Dict]]
Axes = Collection
# For functions like rename that convert one label to another
Renamer = Union[Mapping[Label, Any], Callable[[Label], Label]]
# to maintain type information across generic functions and parametrization
T = TypeVar("T")
# used in decorators to preserve the signature of the function it decorates
# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators
FuncType = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
# types of vectorized key functions for DataFrame::sort_values and
# DataFrame::sort_index, among others
ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]]
IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]]
# types of `func` kwarg for DataFrame.aggregate and Series.aggregate
AggFuncTypeBase = Union[Callable, str]
AggFuncType = Union[
AggFuncTypeBase,
List[AggFuncTypeBase],
Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]],
]
|
[
"kd619@ic.ac.uk"
] |
kd619@ic.ac.uk
|
915f750c2d95faa2dda3d6014a9953a3ca6040ca
|
29cbf374b3c08e17c559b1870370d6db01369bc2
|
/PyCrowlingo/ApiModels/SearchEngine/Responses.py
|
0c43c592cf733a4bc49aa6890ac10275db7fbaa0
|
[] |
no_license
|
Crowlingo/PyCrowlingo
|
f12900dd44da1f04a5d55b064a56fdd5f5918bb6
|
8cd134f8527ae2eebefaaa8c42eb2d79751b26b3
|
refs/heads/master
| 2023-07-04T08:26:06.176478
| 2021-08-11T17:27:55
| 2021-08-11T17:27:55
| 266,834,155
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
from .Examples import Responses as Examples
from ..Attributes import LangSearchResult, DocumentsId
class Search(Examples.Search, LangSearchResult):
pass
class CreateDocuments(Examples.CreateDocuments, DocumentsId):
pass
class CreateKeywords(Examples.CreateKeywords, DocumentsId):
pass
class DeleteDocuments(Examples.DeleteDocuments, DocumentsId):
pass
|
[
"jonas.bouaziz@epita.fr"
] |
jonas.bouaziz@epita.fr
|
937f72f20da6e97acbef35888d289f6c903eb380
|
722b85a88be1688974a87fc307887a1ed3d0e0af
|
/swagger_server/models/data_entry.py
|
cfaca829dad7ddc53c62a935cf1160bfd295fb81
|
[
"MIT"
] |
permissive
|
Capping-WAR/API
|
579d9a354c5b947efad9759939a8c020298bfe76
|
981823732f2b4f8bc007da657d5195579eb7dad3
|
refs/heads/master
| 2020-07-28T05:43:23.062068
| 2019-11-30T20:50:44
| 2019-11-30T20:50:44
| 209,327,306
| 0
| 0
|
MIT
| 2019-11-19T22:22:09
| 2019-09-18T14:19:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,930
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class DataEntry(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, sentence_id: int=None, sentence: str=None, rule_correct_id: int=None, rule_correct: int=None, date_added: str=None): # noqa: E501
"""DataEntry - a model defined in Swagger
:param sentence_id: The sentence_id of this DataEntry. # noqa: E501
:type sentence_id: int
:param sentence: The sentence of this DataEntry. # noqa: E501
:type sentence: str
:param rule_correct_id: The rule_correct_id of this DataEntry. # noqa: E501
:type rule_correct_id: int
:param rule_correct: The rule_correct of this DataEntry. # noqa: E501
:type rule_correct: int
:param date_added: The date_added of this DataEntry. # noqa: E501
:type date_added: str
"""
self.swagger_types = {
'sentence_id': int,
'sentence': str,
'rule_correct_id': int,
'rule_correct': int,
'date_added': str
}
self.attribute_map = {
'sentence_id': 'sentenceID',
'sentence': 'sentence',
'rule_correct_id': 'ruleCorrectID',
'rule_correct': 'ruleCorrect',
'date_added': 'dateAdded'
}
self._sentence_id = sentence_id
self._sentence = sentence
self._rule_correct_id = rule_correct_id
self._rule_correct = rule_correct
self._date_added = date_added
@classmethod
def from_dict(cls, dikt) -> 'DataEntry':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The dataEntry of this DataEntry. # noqa: E501
:rtype: DataEntry
"""
return util.deserialize_model(dikt, cls)
@property
def sentence_id(self) -> int:
"""Gets the sentence_id of this DataEntry.
Unique ID of the sentence that was submitted for review # noqa: E501
:return: The sentence_id of this DataEntry.
:rtype: int
"""
return self._sentence_id
@sentence_id.setter
def sentence_id(self, sentence_id: int):
"""Sets the sentence_id of this DataEntry.
Unique ID of the sentence that was submitted for review # noqa: E501
:param sentence_id: The sentence_id of this DataEntry.
:type sentence_id: int
"""
if sentence_id is None:
raise ValueError("Invalid value for `sentence_id`, must not be `None`") # noqa: E501
self._sentence_id = sentence_id
@property
def sentence(self) -> str:
"""Gets the sentence of this DataEntry.
The user submitted sentence to be reviewed # noqa: E501
:return: The sentence of this DataEntry.
:rtype: str
"""
return self._sentence
@sentence.setter
def sentence(self, sentence: str):
"""Sets the sentence of this DataEntry.
The user submitted sentence to be reviewed # noqa: E501
:param sentence: The sentence of this DataEntry.
:type sentence: str
"""
if sentence is None:
raise ValueError("Invalid value for `sentence`, must not be `None`") # noqa: E501
self._sentence = sentence
@property
def rule_correct_id(self) -> int:
"""Gets the rule_correct_id of this DataEntry.
the ID of the rule that graded # noqa: E501
:return: The rule_correct_id of this DataEntry.
:rtype: int
"""
return self._rule_correct_id
@rule_correct_id.setter
def rule_correct_id(self, rule_correct_id: int):
"""Sets the rule_correct_id of this DataEntry.
the ID of the rule that graded # noqa: E501
:param rule_correct_id: The rule_correct_id of this DataEntry.
:type rule_correct_id: int
"""
if rule_correct_id is None:
raise ValueError("Invalid value for `rule_correct_id`, must not be `None`") # noqa: E501
self._rule_correct_id = rule_correct_id
@property
def rule_correct(self) -> int:
"""Gets the rule_correct of this DataEntry.
the consensus on the correctness of the senetence for the rule; 1 = correct; 0 = incorrect # noqa: E501
:return: The rule_correct of this DataEntry.
:rtype: int
"""
return self._rule_correct
@rule_correct.setter
def rule_correct(self, rule_correct: int):
"""Sets the rule_correct of this DataEntry.
the consensus on the correctness of the senetence for the rule; 1 = correct; 0 = incorrect # noqa: E501
:param rule_correct: The rule_correct of this DataEntry.
:type rule_correct: int
"""
if rule_correct is None:
raise ValueError("Invalid value for `rule_correct`, must not be `None`") # noqa: E501
self._rule_correct = rule_correct
@property
def date_added(self) -> str:
"""Gets the date_added of this DataEntry.
Date added to the database # noqa: E501
:return: The date_added of this DataEntry.
:rtype: str
"""
return self._date_added
@date_added.setter
def date_added(self, date_added: str):
"""Sets the date_added of this DataEntry.
Date added to the database # noqa: E501
:param date_added: The date_added of this DataEntry.
:type date_added: str
"""
if date_added is None:
raise ValueError("Invalid value for `date_added`, must not be `None`") # noqa: E501
self._date_added = date_added
|
[
"daniel.gisolfi1@marist.edu"
] |
daniel.gisolfi1@marist.edu
|
1d07a990fcad3988c9905f19888bd2f562286804
|
12971fc2b1426f3d3a52039f21c4c2d7bb820f68
|
/ProjectEuler/p001/sum_multiples.py
|
3ec6d737728ecde60f3348e4ed294d3faa55f60d
|
[
"MIT"
] |
permissive
|
adrianogil/AlgoExercises
|
29b3c64e071008bffbfe9273130f980100381deb
|
be1d8d22eedade2e313458e8d89185452d9da194
|
refs/heads/main
| 2023-08-18T23:31:51.463767
| 2023-07-22T18:02:46
| 2023-07-22T18:02:46
| 86,254,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
# https://projecteuler.net/problem=1
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
def sum_multiples_3_or_5_below(below_target):
total_sum = 0
for n in range(0, below_target):
if n % 3 == 0 or n % 5 == 0:
total_sum += n
return total_sum
if __name__ == '__main__':
assert sum_multiples_3_or_5_below(10) == 23
# Find the sum of all the multiples of 3 or 5 below 1000.
print(f'The sum of all the multiples of 3 or 5 below 1000 is', sum_multiples_3_or_5_below(1000))
|
[
"adrianogil.san@gmail.com"
] |
adrianogil.san@gmail.com
|
e6e09c8f6ec320e16d3d27bb48752350f7e3e3da
|
7c0820998f6ed2f1f5ee82b8b7ffd67c3228bfb6
|
/pygame/plane_sprites_03_enemy.py
|
91bf620db5da622afeaa52cb838577e18a63260a
|
[] |
no_license
|
youinmelin/practice2020
|
5127241eaccf3ec997bb10671008a9a7c5f9d741
|
47d376b6d264141c229b6afcc2be803f41fd611e
|
refs/heads/master
| 2022-12-12T00:28:22.293373
| 2020-09-22T08:29:37
| 2020-09-22T08:29:37
| 237,427,204
| 0
| 0
| null | 2022-11-04T19:10:12
| 2020-01-31T12:38:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,788
|
py
|
import pygame
import random
SCREEN_RECT = pygame.Rect(0, 0, 480, 700)
CREATE_ENEMY_EVENT = pygame.USEREVENT
HERO_FIRE_EVENT = pygame.USEREVENT + 1
class GameSprite(pygame.sprite.Sprite):
""" plane game sprites"""
def __init__(self,image_name,speed=1):
# 调用父类的初始化方法
super().__init__()
self.image = pygame.image.load(image_name)
self.rect = self.image.get_rect()
self.speed = speed
def update(self):
self.rect.y += self.speed
class Hero(GameSprite):
def __init__(self,speed = 0):
# 调用父类的初始化方法
super().__init__('images\\plane_images\\me1.png')
self.speed = speed
self.rect.y = SCREEN_RECT.bottom - self.rect.height
self.bullet_group = pygame.sprite.Group()
self.i = 1
def update(self):
if self.rect.right > SCREEN_RECT.width:
self.rect.x = SCREEN_RECT.width- self.rect.width
elif self.rect.left < 0:
self.rect.x = 0
else:
self.rect.x += self.speed
if self.i % 20 in range(10):
self.image = pygame.image.load('images\\plane_images\\me1.png')
else:
self.image = pygame.image.load('images\\plane_images\\me2.png')
self.i += 1
def change(self):
if self.i > 500:
self.image = pygame.image.load('images\\plane_images\\me_destroy_3.png')
def fire(self):
for i in range(0,3):
bullet = Bullet(self.rect.centerx,self.rect.y-i*15)
self.bullet_group.add(bullet)
class BackGround(GameSprite):
def __init__(self,pos_y = 0):
# 调用父类的初始化方法
super().__init__('images\\plane_images\\background.png')
self.pos_y = pos_y
self.rect.y = self.pos_y
def update(self):
self.rect.y += self.speed
if self.rect.y >= SCREEN_RECT.height:
self.rect.y = -SCREEN_RECT.height
class Bullet(GameSprite):
def __init__(self,pos_x, pos_y):
# 调用父类的初始化方法
super().__init__('images\\plane_images\\bullet1.png')
self.speed = -2
self.pos_x = pos_x - self.rect.centerx
self.pos_y = pos_y
self.rect.x = self.pos_x
self.rect.y = self.pos_y
def update(self):
super().update()
# if bullets are out of the screem, delete them from group
if self.rect.bottom < 0:
self.kill()
class Enemy(GameSprite):
def __init__(self):
super().__init__('images\\plane_images\\enemy1.png')
self.rect.x = random.randint(0, SCREEN_RECT.width - self.rect.width)
self.rect.y = 0
def update(self):
super().update()
self.speed = 2
if __name__ == '__main__':
pass
|
[
"ygqs@sina.com"
] |
ygqs@sina.com
|
1f0e1b078a3e7b081475307d57849c0b378bbb8e
|
f8580d2c963b6a3c34e918e0743d0a503a9584bd
|
/unittests/test_scrolwin.py
|
117f89e77c3ddf2e066ffd7cbf693e8db8bb1974
|
[] |
no_license
|
pypy/wxpython-cffi
|
f59c3faeed26e6a26d0c87f4f659f93e5366af28
|
877b7e6c1b5880517456f1960db370e4bb7f5c90
|
refs/heads/master
| 2023-07-08T21:13:22.765786
| 2016-12-02T22:10:45
| 2016-12-02T22:10:45
| 397,124,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
import imp_unittest, unittest
import wtc
import wx
#---------------------------------------------------------------------------
class scrolwin_Tests(wtc.WidgetTestCase):
def commonBits(self, w):
vsize = 750
rate = 20
w.SetSize(self.frame.GetClientSize())
w.EnableScrolling(True, True)
w.ShowScrollbars(wx.SHOW_SB_ALWAYS, wx.SHOW_SB_ALWAYS)
w.SetVirtualSize((vsize, vsize))
w.SetScrollRate(rate, rate)
w.Scroll(3,3) # in scroll units
self.myYield()
self.assertEqual(w.GetVirtualSize(), (vsize,vsize))
self.assertEqual(w.GetScrollPixelsPerUnit(), (rate,rate))
self.assertEqual(w.GetViewStart(), (3,3)) # scroll units
self.assertEqual(w.CalcScrolledPosition(0,0), (-3*rate,-3*rate)) # pixels
self.assertEqual(w.CalcUnscrolledPosition(0,0),(3*rate,3*rate)) # pixels
# also test the Point overloads
self.assertEqual(w.CalcScrolledPosition( (0,0) ), (-3*rate,-3*rate)) # pixels
self.assertEqual(w.CalcUnscrolledPosition( (0,0) ),(3*rate,3*rate)) # pixels
def test_scrolwinCtor(self):
w = wx.ScrolledWindow(self.frame)
self.commonBits(w)
def test_scrolwinDefaultCtor(self):
w = wx.ScrolledWindow()
w.Create(self.frame)
self.commonBits(w)
def test_scrolcvsCtor(self):
w = wx.ScrolledCanvas(self.frame)
self.commonBits(w)
def test_scrolcvsDefaultCtor(self):
w = wx.ScrolledCanvas()
w.Create(self.frame)
self.commonBits(w)
def test_scrolwinOnDraw(self):
class MyScrolledWin(wx.ScrolledWindow):
def __init__(self, *args, **kw):
wx.ScrolledWindow.__init__(self, *args, **kw)
self.flag = False
def OnDraw(self, dc):
self.flag = True
sz = dc.GetSize()
dc.SetPen(wx.Pen('blue', 3))
dc.DrawLine(0, 0, sz.width, sz.height)
w = MyScrolledWin(self.frame)
self.commonBits(w)
w.Refresh()
self.myUpdate(w)
self.myYield()
self.assertTrue(w.flag) # True if OnDraw was called
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"wayedt@gmail.com"
] |
wayedt@gmail.com
|
40ebe62b6d5033e8e10ea24956730f3e06ee69b1
|
a6aaca52563f35a05f37dcd27237e0c04a5427a9
|
/integration/integration.py
|
42354a7aa9db9c73d1ccb007463d032aff974f0a
|
[
"Apache-2.0"
] |
permissive
|
backwardn/s2
|
4395cf17ca2e711e6f35f6c164761f21aebb0163
|
d52f35094520fd992cb2a114dc8896b91978524d
|
refs/heads/master
| 2022-10-11T07:58:42.049661
| 2020-06-09T18:33:54
| 2020-06-09T18:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,268
|
py
|
#!/usr/bin/env python3
import os
import sys
import argparse
import subprocess
from urllib.parse import urlparse
ROOT = os.path.dirname(os.path.abspath(__file__))
TESTDATA = os.path.join(ROOT, "testdata")
def main():
parser = argparse.ArgumentParser(description="Runs the s2 integration test suite.")
parser.add_argument("address", help="Address of the s2 instance")
parser.add_argument("--access-key", default="", help="Access key")
parser.add_argument("--secret-key", default="", help="Secret key")
parser.add_argument("--test", default=None, help="Run a specific test")
args = parser.parse_args()
suite_filter = None
test_filter = None
if args.test is not None:
parts = args.test.split(":", maxsplit=1)
if len(parts) == 1:
suite_filter = parts[0]
else:
suite_filter, test_filter = parts
# Create some sample data if it doesn't exist yet
if not os.path.exists(TESTDATA):
os.makedirs(TESTDATA)
with open(os.path.join(TESTDATA, "small.txt"), "w") as f:
f.write("x")
with open(os.path.join(TESTDATA, "large.txt"), "w") as f:
f.write("x" * (65 * 1024 * 1024))
url = urlparse(args.address)
env = dict(os.environ)
env["S2_HOST_ADDRESS"] = args.address
env["S2_HOST_NETLOC"] = url.netloc
env["S2_HOST_SCHEME"] = url.scheme
env["S2_ACCESS_KEY"] = args.access_key
env["S2_SECRET_KEY"] = args.secret_key
def run(cwd, *args):
subprocess.run(args, cwd=os.path.join(ROOT, cwd), env=env, check=True)
try:
if suite_filter is None or suite_filter == "python":
args = ["-k", test_filter] if test_filter is not None else []
run("python", os.path.join("venv", "bin", "pytest"), "test.py", *args)
if suite_filter is None or suite_filter == "go":
args = ["-count=1"]
if test_filter is not None:
args.append("-run={}".format(test_filter))
args.append("./...")
run("go", "go", "test", *args)
if suite_filter is None or suite_filter == "cli":
run("cli", "bash", "test.sh")
except subprocess.CalledProcessError:
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"simonson@gmail.com"
] |
simonson@gmail.com
|
8858af318d2749519148dd9344b4630cad0dd249
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/meetup/3301efd411054cab9d496465a25695fc.py
|
bedae31529510afb9d51cb80c1b95b6583865644
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
"""
Utility to get the date of meetups.
Written by Bede Kelly for Exercism.
"""
import datetime
from calendar import day_name, monthrange
__author__ = "Bede Kelly"
def meetup_day(year, month, weekday, selector):
"""Returns the date of a meetup."""
teenth_days = range(13, 20) # 20th not included.
weekdays = list(day_name)
if selector == "teenth":
for day in teenth_days:
date = datetime.date(year, month, day)
if weekday == weekdays[date.weekday()]:
return date
else:
selectors = {
"1st": 0,
"2nd": 1,
"3rd": 2,
"4th": 3,
"last": -1
}
index = selectors[selector]
number_days = monthrange(year, month)[1]
dates_range = range(1, number_days+1)
all_dates = [datetime.date(year, month, day) for day in dates_range]
possible_dates = [d for d in all_dates
if d.weekday() == weekdays.index(weekday)]
return datetime.date(year, month, possible_dates[index].day)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
3b8bf356fc0ab962f89c390875463e6796501f66
|
80888878480eb75e7e55a1f1cbc7273bf176a146
|
/pyclaw_profiler.py
|
5324d0ae2f57f6962aa10e6973d395a57db05afb
|
[] |
no_license
|
ketch/pyclaw-scaling
|
eff0ea15459d6266cf222a9a2461abe2fc1831a9
|
633510b902113c80b6111fcaf990987f8ca99fa2
|
refs/heads/master
| 2021-01-19T08:10:46.509637
| 2014-11-19T13:02:28
| 2014-11-19T13:02:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,229
|
py
|
#!/usr/bin/env python
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import colorConverter
import pstats
params = {'backend': 'ps',
'axes.labelsize': 10,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,}
matplotlib.rcParams.update(params)
#Some simple functions to generate colours.
def pastel(colour, weight=2.4):
""" Convert colour into a nice pastel shade"""
rgb = np.asarray(colorConverter.to_rgb(colour))
# scale colour
#maxc = max(rgb)
#if maxc < 1.0 and maxc > 0:
# # scale colour
# scale = 1.0 / maxc
# rgb = rgb * scale
# now decrease saturation
total = sum(rgb)
slack = 0
for x in rgb:
slack += 1.0 - x
# want to increase weight from total to weight
# pick x s.t. slack * x == weight - total
# x = (weight - total) / slack
x = (weight - total) / slack
rgb = [c + 0.75*(x * (1.0-c)) for c in rgb]
return rgb
def get_colours(n):
""" Return n pastel colours. """
base = np.asarray([[0.8,0.8,0], [0.8,0,0.8], [0,0.8,0.8]])
if n <= 3:
return base[0:n]
# how many new colours do we need to insert between
# red and green and between green and blue?
needed = (((n - 3) + 1) / 2, (n - 3) / 2)
colours = []
for start in (0, 1):
for x in np.linspace(0, 1-(1.0/(needed[start]+1)), needed[start]+1):
colours.append((base[start] * (1.0 - x)) +
(base[start+1] * x))
colours.append([0,0,1])
return [pastel(c) for c in colours[0:n]]
time_components = {
'CFL reduce' : "<method 'max' of 'petsc4py.PETSc.Vec' objects>",
'Parallel initialization' : "<method 'create' of 'petsc4py.PETSc.DA' objects>",
'Ghost cell communication' : "<method 'globalToLocal' of 'petsc4py.PETSc.DM' objects>",
'Time evolution' : "evolve_to_time",
'setup' : "setup"
}
def extract_profile(nsteps=200,ndim=3,solver_type='sharpclaw',nvals=(1,3,4),process_rank=0):
stats_dir = './scaling'+str(nsteps)+'_'+str(ndim)+'d_'+str(solver_type)
times = {}
for key in time_components.iterkeys():
times[key] = []
times['Concurrent computations'] = []
nprocs = []
for n in nvals:
num_processes = 2**(3*n)
num_cells = 2**(6+n)
nprocs.append(str(num_processes))
prof_filename = os.path.join(stats_dir,'statst_'+str(num_processes)+'_'+str(num_cells)+'_'+str(process_rank))
profile = pstats.Stats(prof_filename)
prof = {}
for key, value in profile.stats.iteritems():
method = key[2]
cumulative_time = value[3]
prof[method] = cumulative_time
for component, method in time_components.iteritems():
times[component].append(round(prof[method],1))
times['Concurrent computations'] = [ times['Time evolution'][i]
- times['CFL reduce'][i]
- times['Ghost cell communication'][i]
for i in range(len(times['Time evolution']))]
return nprocs,times
def plot_and_table(nsteps=200,ndim=3,solver_type='sharpclaw',nvals=(1,3,4),process_rank=0):
nprocs, times = extract_profile(nsteps,ndim,solver_type,nvals,process_rank)
rows = ['Concurrent computations',
'Parallel initialization',
'Ghost cell communication',
'CFL reduce']
# Get some pastel shades for the colours
colours = get_colours(len(rows))
nrows = len(rows)
x_bar = np.arange(len(nprocs)) + 0.3 # the x locations for the groups
bar_width = 0.4
yoff = np.array([0.0] * len(nprocs)) # the bottom values for stacked bar chart
plt.axes([0.35, 0.25, 0.55, 0.35]) # leave room below the axes for the table
for irow,row in enumerate(rows):
plt.bar(x_bar, times[row], bar_width, bottom=yoff, color=colours[irow], linewidth=0)
yoff = yoff + times[row]
table_data = [times[row] for row in rows]
# Add total efficiency to the table_data
table_data.append( np.array([sum([row[i] for row in table_data]) for i in range(len(nprocs))]))
table_data[-1] = table_data[-1][0]/table_data[-1]
table_data[-1] = [round(x,2) for x in table_data[-1]]
rows.append('Parallel efficiency')
colours.append([1,1,1])
# Add a table at the bottom of the axes
mytable = plt.table(cellText=table_data,
rowLabels=rows, rowColours=colours,
colLabels=nprocs,
loc='bottom').set_fontsize(8)
plt.ylabel('Execution Time for Process '+ str(process_rank)+' (s)')
plt.figtext(.5, .02, "Number of Cores", fontsize=10)
vals = np.arange(0, 36, 5)
plt.yticks(vals, ['%d' % val for val in vals])
plt.xticks([])
plt.draw()
f=plt.gcf()
f.set_figheight(5)
f.set_figwidth(5)
plt.savefig('scaling_'+solver_type+'_'+str(ndim)+'D.pdf')
if __name__ == '__main__':
plot_and_table()
|
[
"dketch@gmail.com"
] |
dketch@gmail.com
|
fb8910d631fb39f1201915c814f6f177ff8e36fe
|
d6ed05e23faa20beb5e47624870608a9219ea81c
|
/TuningTools_old/scripts/skeletons/datacurator.py
|
554954e0e803f3f13f472feaba5219997fc0f3e9
|
[] |
no_license
|
kaducovas/ringer
|
f6495088c0d54d622dcc707333b4c2fbf132d65f
|
603311caab016ad0ef052ea4fcc605c5ac4e494b
|
refs/heads/master
| 2020-06-16T21:37:15.228364
| 2019-07-08T01:29:57
| 2019-07-08T01:29:57
| 195,477,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,949
|
py
|
from RingerCore import masterLevel, LoggingLevel, keyboard
from TuningTools import *
import ROOT
ROOT.TH1.AddDirectory(ROOT.kFALSE)
ROOT.gROOT.SetBatch(ROOT.kTRUE)
masterLevel.set( LoggingLevel.VERBOSE )
# FIXME Go to data curator and force multiStop if needed
dCurator = DataCurator( kw, dataLocation = dataLocation )
sort = 0
#dCurator.crossValid._sort_boxes_list[sort] = [3, 4, 5, 7, 8, 9, 0, 1, 2, 6]
dCurator.prepareForBin( etBinIdx = 2, etaBinIdx = 0
, loadEfficiencies = True, loadCrossEfficiencies = False )
dCurator.toTunableSubsets( sort, PreProcChain( RingerEtaMu() ) )
td = TunedDiscrArchieve.load('/home/wsfreund/junk/tuningDataTest/networks2/nn.tuned.pp-ExNREM.hn0010.s0000.il0000.iu0002.et0002.eta0000.pic.gz')
decisionMaker = DecisionMaker( dCurator, {}, removeOutputTansigTF = True, pileupRef = 'nvtx' )
tunedDict = td.getTunedInfo( 10, 0, 2 )
tunedDiscrSP = tunedDict['tunedDiscr'][0]
sInfoSP = tunedDiscrSP['summaryInfo']
trnOutSP = [npCurrent.fp_array( a ) for a in sInfoSP['trnOutput']]
valOutSP = [npCurrent.fp_array( a ) for a in sInfoSP['valOutput']]
tunedDiscrPd = tunedDict['tunedDiscr'][1]
sInfoPd = tunedDiscrPd['summaryInfo']
trnOutPd = [npCurrent.fp_array( a ) for a in sInfoPd['trnOutput']]
valOutPd = [npCurrent.fp_array( a ) for a in sInfoPd['valOutput']]
tunedDiscrPf = tunedDict['tunedDiscr'][2]
sInfoPf = tunedDiscrPf['summaryInfo']
trnOutPf = [npCurrent.fp_array( a ) for a in sInfoPf['trnOutput']]
valOutPf = [npCurrent.fp_array( a ) for a in sInfoPf['valOutput']]
decisionTaking = decisionMaker( tunedDiscrPf['discriminator'] )
a = ROOT.TFile("pf.root","recreate")
a.cd()
decisionTaking( dCurator.references[2], CuratedSubset.trnData, neuron = 10, sort = 0, init = 0 )
s = CuratedSubset.fromdataset(Dataset.Test)
tstPointCorr = decisionTaking.getEffPoint( dCurator.references[2].name + '_Test' , subset = [s, s], makeCorr = True )
decisionTaking.saveGraphs()
a.Write()
del a
print tstPointCorr
print dCurator.crossValid.getTrnBoxIdxs( sort )
print dCurator.crossValid.getValBoxIdxs( sort )
try:
print trnOutPf[0] - decisionTaking.sgnOut
print trnOutPf[1] - decisionTaking.bkgOut
print valOutPf[0] - decisionTaking._effOutput[0]
print valOutPf[1] - decisionTaking._effOutput[1]
except ValueError: pass
keyboard()
b = ROOT.TFile("sp.root",'recreate')
b.cd()
decisionTaking = decisionMaker( tunedDiscrSP['discriminator'] )
decisionTaking( dCurator.references[0], CuratedSubset.trnData, neuron = 10, sort = 0, init = 0 )
s = CuratedSubset.fromdataset(Dataset.Test)
tstPointCorr = decisionTaking.getEffPoint( dCurator.references[0].name + '_Test' , subset = [s, s], makeCorr = True )
decisionTaking.saveGraphs()
b.Write()
print tstPointCorr
try:
print trnOutSP[0] - decisionTaking.sgnOut
print trnOutSP[1] - decisionTaking.bkgOut
print valOutSP[0] - decisionTaking._effOutput[0]
print valOutSP[1] - decisionTaking._effOutput[1]
except ValueError: pass
|
[
"kaducovas@gmail.com"
] |
kaducovas@gmail.com
|
00d990a843520de66f3ef9a02f2e9b4c7b4f3634
|
33f3179000f0275e0e3253671b518e98611128d9
|
/migrations/versions/063cfb907a5c_.py
|
1759edaa8a7b0ebacebb21d33594d5c72c5016a8
|
[] |
no_license
|
ss820938ss/pro_chatbot
|
e0ff85d700a5fe3d33b4028b1ea7a3050e4367cd
|
f23d88dcfeffc33dea48972819dc8643975a3dae
|
refs/heads/master
| 2023-07-26T20:54:31.653597
| 2021-09-10T03:47:59
| 2021-09-10T03:47:59
| 397,513,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
"""empty message
Revision ID: 063cfb907a5c
Revises:
Create Date: 2021-08-18 20:55:32.437776
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '063cfb907a5c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('member',
sa.Column('no', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=False),
sa.Column('email', sa.String(length=200), nullable=False),
sa.Column('password', sa.String(length=200), nullable=False),
sa.PrimaryKeyConstraint('no')
)
op.create_table('menu',
sa.Column('menu_no', sa.Integer(), nullable=False),
sa.Column('menu_name', sa.String(length=200), nullable=False),
sa.Column('menu_price', sa.Integer(), nullable=False),
sa.Column('menu_kate', sa.String(length=200), nullable=False),
sa.PrimaryKeyConstraint('menu_no')
)
op.create_table('question',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('subject', sa.String(length=200), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('create_date', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('answer',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('question_id', sa.Integer(), nullable=True),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('create_date', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['question_id'], ['question.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('answer')
op.drop_table('question')
op.drop_table('menu')
op.drop_table('member')
# ### end Alembic commands ###
|
[
"ss820938ss@gmail.com"
] |
ss820938ss@gmail.com
|
1060c16c44b2ccd9172668d50fee71da836dbd5e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/6/usersdata/129/1287/submittedfiles/investimento.py
|
c5f1ee45d7f2ffb03013182917c20506cd4c63ca
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
#COMECE SEU CODIGO AQUI
A= input('Digite o valor do investimento inicial: ')
B= input('Digite o valor de crescimento percentual: ')
C= A + A*B
D= C + C*B
E= D + D*B
F= E + E*B
G= F + F*B
H= G + G*B
I= H + H*B
J= I + I*B
K= J + J*B
L= K + K*B
print ('%.2f' %C)
print ('%.2f' %D)
print ('%.2f' %E)
print ('%.2f' %F)
print ('%.2f' %G)
print ('%.2f' %H)
print ('%.2f' %I)
print ('%.2f' %J)
print ('%.2f' %K)
print ('%.2f' %L)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
6d5da17b1099b03b0b389db4251bab06d7722e51
|
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
|
/generated-libraries/python/netapp/lun/invalid_use_partner_cfmode_setting_info.py
|
d4c1dc34b9ec55d375e71885616f1a9920a60be8
|
[
"MIT"
] |
permissive
|
radekg/netapp-ontap-lib-gen
|
530ec3248cff5ead37dc2aa47ced300b7585361b
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
refs/heads/master
| 2016-09-06T17:41:23.263133
| 2015-01-14T17:40:46
| 2015-01-14T17:40:46
| 29,256,898
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
from netapp.netapp_object import NetAppObject
class InvalidUsePartnerCfmodeSettingInfo(NetAppObject):
"""
Information about an invalid initiator group use_partner
and cfmode
"""
_initiator_group_name = None
@property
def initiator_group_name(self):
"""
Name of this initiator group.
"""
return self._initiator_group_name
@initiator_group_name.setter
def initiator_group_name(self, val):
if val != None:
self.validate('initiator_group_name', val)
self._initiator_group_name = val
_is_use_partner_enabled = None
@property
def is_use_partner_enabled(self):
"""
If true this initiator group's members are
allowed to use the partner port.
"""
return self._is_use_partner_enabled
@is_use_partner_enabled.setter
def is_use_partner_enabled(self, val):
if val != None:
self.validate('is_use_partner_enabled', val)
self._is_use_partner_enabled = val
@staticmethod
def get_api_name():
return "invalid-use-partner-cfmode-setting-info"
@staticmethod
def get_desired_attrs():
return [
'initiator-group-name',
'is-use-partner-enabled',
]
def describe_properties(self):
return {
'initiator_group_name': { 'class': basestring, 'is_list': False, 'required': 'required' },
'is_use_partner_enabled': { 'class': bool, 'is_list': False, 'required': 'required' },
}
|
[
"radek@gruchalski.com"
] |
radek@gruchalski.com
|
3962de788f504519a10a81bf39a059fafa67d08d
|
2aba3c043ce4ef934adce0f65bd589268ec443c5
|
/atcoder/CODE_FESTIVAL_2015_qual_B/B.py
|
9e6979aff253edb3737ba01ff0bf57c0c50a01b5
|
[] |
no_license
|
kambehmw/algorithm_python
|
4f66593b77039d90515d1fcbecacdab8c811b92f
|
17222399dcc92fd8f908e5774a9883e2e89c486e
|
refs/heads/master
| 2020-06-02T12:44:11.322356
| 2020-05-18T13:22:05
| 2020-05-18T13:22:05
| 191,157,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from collections import Counter
N, M = map(int, input().split())
A = list(map(int, input().split()))
counter = Counter(A)
ans = max(counter.items(), key=lambda x: x[1])
if N // 2 < ans[1]:
print(ans[0])
else:
print("?")
|
[
"kanbe.hmw@gmail.com"
] |
kanbe.hmw@gmail.com
|
a74f698d0e534a081149edbe1311ec943dbcfd16
|
12a652ffb301e2f48cb80ddc8fdc556926b8026f
|
/scripts/python/anagrams/ngrams.py
|
f682d31a98056bbe05337294d7965b0e99c424ec
|
[
"MIT"
] |
permissive
|
jeremiahmarks/dangerzone
|
de396da0be6bcc56daf69f2f3093afed9db5ede3
|
fe2946b8463ed018d2136ca0eb178161ad370565
|
refs/heads/master
| 2020-05-22T12:45:34.212600
| 2017-04-18T02:57:59
| 2017-04-18T02:57:59
| 28,025,861
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
import string
import random
import grams
class anagramfinder(object):
def checkLetters(self,mainstring, teststring):
#print "checkLetters is checking "+mainstring + " against "+teststring
isThere=True
for letter in teststring:
if (teststring.count(letter)>mainstring.count(letter)):return False
return isThere
def getWordList(self):
words=set()
wordFile=open('/home/jlmarks/words.txt','r')
for word in wordFile:
words.add(word[:-1])
wordFile.close()
self.entiredictionary=words
stringasset=set(self.astring)
for word in words:
if (set(word).issubset(stringasset)&self.checkLetters(self.astring,word)):
self.allwords.append(word)
def __init__ (self, astring):
self.allwords=[]
self.points=[]
self.wordpoints=[]
self.astring=astring
self.getWordList()
self.auditwordlist()
self.computeWordScore()
def auditwordlist(self):
for letter in set(self.astring):
wordsusedin=0
for word in self.allwords:
if letter in set(word):
wordsusedin+=1
self.points.append([wordsusedin,letter])
self.points.sort()
tempdict={}
for position in range(len(self.points)):
#print [self.points[len(self.points)-position][0], self.points[position][1]]
tempdict[self.points[position][1]]= self.points[len(self.points)-1-position][0]
self.points=tempdict
def computeWordScore(self):
maxscore=0
for letter in self.astring:
maxscore=maxscore+self.points[letter]
for word in self.allwords:
score=0
for letter in word:
score=score+self.points[letter]
self.wordpoints.append([score,word])
if score>=maxscore:
print word
|
[
"Jeremiah@JLMarks.org"
] |
Jeremiah@JLMarks.org
|
2b65d1e32c6aedc8f97411e58999eaa62a8b13ac
|
e4cab6feadcee618f092f23020a157c8ded42ffc
|
/WEB/2. Bots/FirstBot/tools/keyboards.py
|
f7b619b6937bbdf92a8ada14d54037cd34d60240
|
[] |
no_license
|
Larionov0/Group3_Lessons
|
7c314898a70c61aa445db37383076e211692b56b
|
628bc7efe6817d107cb39d3017cb7cee44b86ba4
|
refs/heads/master
| 2023-08-22T07:14:44.595963
| 2021-10-17T11:48:06
| 2021-10-17T11:48:06
| 339,141,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
import json
def make_keyboard(list_keyboard):
"""
keyboard = [
["Грати", "Магазин"],
["Моя корзина"],
["Друзі", "Підтримка"]
]
f(keyboard) -> JSON:
{
"keyboard": [
[{"text": "Пошук лобі"}, {"text": "Створити лобі"}],
[{"text": "Магазин"}],
[{"text": "Підтримка"}]
]
}
"""
dct = {
"keyboard": []
}
for row in list_keyboard:
new_row = []
for button_name in row:
button = {'text': button_name}
new_row.append(button)
dct['keyboard'].append(new_row)
return json.dumps(dct, ensure_ascii=False)
if __name__ == '__main__':
print(make_keyboard([
["Грати", "Магазин"],
["Моя корзина"],
["Друзі", "Підтримка"]
]))
|
[
"larionov1001@gmail.com"
] |
larionov1001@gmail.com
|
e13f85a697789a6add9a026b87968ab8ad0c62aa
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02646/s315626964.py
|
e9854e7e7c5008b40ed6f4acd03e5ae61be041f9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
A, V = map(int, input().split())
B, W = map(int, input().split())
T = int(input())
if A > B:
x = A
A = B
B = x
if V <= W:
ans="NO"
elif T*V+A-B >= T*W:
ans="YES"
else:
ans="NO"
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
676c910681338a412c9976012360ab390235b2e3
|
52585c8d95cef15199c18ba1a76899d2c31329f0
|
/05PythonCookbook/ch12Concurrency/14launching_a_daemon_process_on_unix/daemon.py
|
b3b2215ad1ffb5424f87257ca06e09df1f0e1482
|
[] |
no_license
|
greatabel/PythonRepository
|
c7a952257303a21083ed7d535274c339362bd126
|
836fcdd3f5c1b150122302685104fe51b5ebe1a3
|
refs/heads/master
| 2023-08-30T15:56:05.376391
| 2023-08-26T03:34:14
| 2023-08-26T03:34:14
| 29,392,599
| 33
| 6
| null | 2023-02-14T13:33:21
| 2015-01-17T13:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,928
|
py
|
# $python3 daemon.py start
# $cat /tmp/daemon.pid
# 34358
# $tail -f /tmp/daemon.log
# Daemon started with pid 34358
# Daemon Alive! Sat Jan 9 17:30:39 2016
# Daemon Alive! Sat Jan 9 17:30:49 2016
# Daemon Alive! Sat Jan 9 17:30:59 2016
# Daemon Alive! Sat Jan 9 17:31:09 2016
# Daemon Alive! Sat Jan 9 17:31:19 2016
# ^C
# $python3 daemon.py stop
#!/usr/bin/env python3
# daemon.py
import os
import sys
import atexit
import signal
def daemonize(pidfile, *, stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null'):
if os.path.exists(pidfile):
raise RuntimeError('Already running')
# First fork (detaches from parent)
try:
if os.fork() > 0:
raise SystemExit(0) # Parent exit
except OSError as e:
raise RuntimeError('fork #1 failed.')
os.chdir('/')
os.umask(0)
os.setsid()
# Second fork (relinquish session leadership)
try:
if os.fork() > 0:
raise SystemExit(0)
except OSError as e:
raise RuntimeError('fork #2 failed.')
# Flush I/O buffers
sys.stdout.flush()
sys.stderr.flush()
# Replace file descriptors for stdin, stdout, and stderr
with open(stdin, 'rb', 0) as f:
os.dup2(f.fileno(), sys.stdin.fileno())
with open(stdout, 'ab', 0) as f:
os.dup2(f.fileno(), sys.stdout.fileno())
with open(stderr, 'ab', 0) as f:
os.dup2(f.fileno(), sys.stderr.fileno())
# Write the PID file
with open(pidfile,'w') as f:
print(os.getpid(),file=f)
# Arrange to have the PID file removed on exit/signal
atexit.register(lambda: os.remove(pidfile))
# Signal handler for termination (required)
def sigterm_handler(signo, frame):
raise SystemExit(1)
signal.signal(signal.SIGTERM, sigterm_handler)
def main():
import time
sys.stdout.write('Daemon started with pid {}\n'.format(os.getpid()))
while True:
sys.stdout.write('Daemon Alive! {}\n'.format(time.ctime()))
time.sleep(10)
if __name__ == '__main__':
PIDFILE = '/tmp/daemon.pid'
if len(sys.argv) != 2:
print('Usage: {} [start|stop]'.format(sys.argv[0]), file=sys.stderr)
raise SystemExit(1)
if sys.argv[1] == 'start':
try:
daemonize(PIDFILE,
stdout='/tmp/daemon.log',
stderr='/tmp/dameon.log')
except RuntimeError as e:
print(e, file=sys.stderr)
raise SystemExit(1)
main()
elif sys.argv[1] == 'stop':
if os.path.exists(PIDFILE):
with open(PIDFILE) as f:
os.kill(int(f.read()), signal.SIGTERM)
else:
print('Not running', file=sys.stderr)
raise SystemExit(1)
else:
print('Unknown command {!r}'.format(sys.argv[1]), file=sys.stderr)
raise SystemExit(1)
|
[
"greatabel1@126.com"
] |
greatabel1@126.com
|
6dfd0a8cae771b4bd31112c19577568d72aa67ab
|
04afe39888ea2a3a131a7096dc4eea08e556aea3
|
/upgrade/models/check-output.py
|
d0b7eb2c12b36ea54d1cca225ee0d0552a1fdd91
|
[] |
no_license
|
vinzenz/actor-stdlib-examples
|
2b0b65b8e6ae2e68a058fbe83e6daecde7c6f8c1
|
35108d2a83703a367a6ac807e651bb04730e5bfd
|
refs/heads/master
| 2021-04-29T02:04:10.185880
| 2018-03-28T09:57:46
| 2018-03-28T09:57:46
| 121,812,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
from leapp.models import Model, fields
from leapp.topics import CheckOutputTopic
class CheckOutput(Model):
topic = CheckOutputTopic
check_actor = fields.String(required=True)
check_action = fields.String()
status = fields.String(required=True)
summary = fields.String(required=True)
params = fields.List(fields.String(), required=True)
|
[
"vfeenstr@redhat.com"
] |
vfeenstr@redhat.com
|
df9999f0bc1ec9301db6c6b588572cdef393fb3d
|
24946a607d5f6425f07d6def4968659c627e5324
|
/Algorithms/staircase.py
|
7301debb6ce9b06238ee64987170966a5d2d29f6
|
[] |
no_license
|
mmrubayet/HackerRank_solutions
|
5d8acbb8fd6f305a006f147e6cb76dbfc71bbca5
|
f1c72fbf730b6a79656d578f6c40a128a6f0ac5c
|
refs/heads/master
| 2023-06-02T16:51:18.017902
| 2021-06-19T18:35:41
| 2021-06-19T18:35:41
| 233,853,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the staircase function below.
def staircase(n):
for i in range(1, n+1):
print(('#'*i).rjust(n, ' '))
if __name__ == '__main__':
n = int(input())
staircase(n)
|
[
"m.rubayet94@gmail.com"
] |
m.rubayet94@gmail.com
|
4c45b9f0a45f25ef256430aba68a2a6cd0205869
|
07f92805a75dc91b8be2ac14c238394245eda9ea
|
/Python编程从入门到实践/ch10/write_message.py
|
f63d0f76f2edfe7dd25ab4d01381877ac1545533
|
[] |
no_license
|
08zhangyi/Some-thing-interesting-for-me
|
6ea7366ef1f0812397300259b2e9d0e7217bcba0
|
f4cbda341ada98753c57a3ba07653163522dd023
|
refs/heads/master
| 2023-01-11T22:54:03.396911
| 2023-01-06T05:47:41
| 2023-01-06T05:47:41
| 136,426,995
| 7
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
filename = 'programming.txt'
with open(filename, 'w') as file_object:
file_object.write("I love programming.\n")
file_object.write("I love creating new games.\n")
with open(filename, 'a') as file_object:
file_object.write("I also love finding meaning in large datasets.\n")
file_object.write("I love creating apps that can run in a browser.\n")
|
[
"395871987@qq.com"
] |
395871987@qq.com
|
50a408b8dfc41fdb59dde8b5a78466973222fb85
|
e65ce709feadc277b95032be5269d450deab76fc
|
/ark/account/views.py
|
12c0f4a241d6385ea451e1189fed69b176de86be
|
[
"MIT"
] |
permissive
|
N402/NoahsArk
|
77c6b8d8ddfdf76688575cc9d1f65ba432b6286a
|
97dbe295d0579912860c7b7a4509a3912d5a783b
|
refs/heads/master
| 2016-09-10T20:38:48.101055
| 2015-08-18T02:50:00
| 2015-08-18T02:50:00
| 28,763,317
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,322
|
py
|
from flask import Blueprint, render_template
from flask import url_for, redirect, request, jsonify
from flask.ext.login import current_user, login_required
from flask.ext.babel import gettext
from sqlalchemy import or_
from ark.exts import db
from ark.utils.qiniu import get_url
from ark.utils.helper import jsonify_lazy
from ark.account.forms import (
SignUpForm, SignInForm, ChangePassword, AvatarForm, ProfileForm)
from ark.goal.services import get_charsing_goals, get_completed_goals
from ark.account.models import Account
from ark.goal.models import Goal, GoalActivity
from ark.goal.forms import GoalActivityForm, CreateGoalForm
from ark.account.services import (signin_user, signout_user,
signup_user, add_signin_score)
from ark.notification.models import Notification
account_app = Blueprint('account', __name__)
@account_app.route('/account/signin', methods=('POST',))
def signin():
if not current_user.is_anonymous():
return redirect(url_for('goal.goals', uid=current_user.id))
form = SignInForm(request.form)
if form.validate_on_submit():
email = form.data['signin_email'].strip()
password = form.data['signin_password'].strip()
is_remember_me = form.data.get('remember_me', 'y') == 'y'
user = Account.query.authenticate(email, password)
if user:
add_signin_score(user)
signin_user(user, remember=is_remember_me)
return jsonify(success=True)
else:
#TODO: refactor
return jsonify_lazy(
success=False,
messages={
'signin_email': [
unicode(gettext(u'email or password is wrong'))]})
if form.errors:
return jsonify_lazy(success=False,
status="errors",
messages=form.errors)
return render_template('account/signin.html', form=form)
@account_app.route('/account/signup', methods=('POST',))
def signup():
if not current_user.is_anonymous():
return redirect(url_for('goal.goals', uid=current_user.id))
form = SignUpForm(request.form)
if form.validate_on_submit():
email = form.data['email'].strip()
username = form.data['username'].strip()
password = form.data['password'].strip()
user = Account(
email=email,
username=username,
password=password,
)
db.session.add(user)
signup_user(user)
db.session.commit()
signin_user(user, remember=True)
return jsonify(success=True)
if form.errors:
return jsonify_lazy(success=False,
status="errors",
messages=form.errors)
return render_template('account/signup.html', form=form)
@account_app.route('/account/signout')
@login_required
def signout():
next = request.args.get('next') or url_for('master.index')
signout_user(current_user)
return redirect(next)
@account_app.route('/account/profile', methods=('PUT',))
@login_required
def profile():
form = ProfileForm(request.form)
if form.validate_on_submit():
if current_user.email is None and form.data['email']:
current_user.email = form.data['email']
current_user.username = form.data['username']
current_user.whatsup = form.data['whatsup']
db.session.add(current_user)
db.session.commit()
return jsonify(success=True)
if form.errors:
return jsonify_lazy(success=False, messages=form.errors)
@account_app.route('/account/avatar', methods=['GET', 'POST'])
@login_required
def avatar():
form = AvatarForm(request.form)
if form.validate_on_submit():
current_user.avatar_url = get_url(form.data['avatar_url'])
db.session.add(current_user)
db.session.commit()
return jsonify(success=True, url=current_user.avatar_url)
if form.errors:
return jsonify(success=False, messages=form.errors)
return render_template('account/avatar.html', form=form)
@account_app.route('/account/profile/password', methods=('PUT',))
@login_required
def password():
form = ChangePassword(request.form)
if form.validate_on_submit():
current_user.change_password(form.data['new_password'])
db.session.add(current_user)
db.session.commit()
return jsonify(success=True)
if form.errors:
return jsonify_lazy(success=False, messages=form.errors)
@account_app.route('/account/messages')
@login_required
def messages():
page = int(request.args.get('page', 1))
pagination = (Notification.query
.filter(or_(Notification.receivers.any(
Account.id==current_user.id),
Notification.send_to_all==True))
.filter(Notification.created >= current_user.created)
.order_by(Notification.created.desc())
.paginate(page, 10))
return render_template(
'account/messages.html', page=page, pagination=pagination)
@account_app.route('/account/messages/mark_read', methods=('PUT',))
@login_required
def mark_read():
current_user.mark_read()
return jsonify(success=True)
|
[
"shonenada@gmail.com"
] |
shonenada@gmail.com
|
974eba86119b66cbc84ecb49bef7bbc009673c99
|
777a972966fa29a1b5a1a0c5d507a3137de007fc
|
/container_management/models/container_config.py
|
2f7e0aa4f60a6260eda2b5e20d1f9c9438efcb90
|
[] |
no_license
|
suningwz/ruvati
|
1d1ace30fb2929f686f368fb8d8c51ae76a71190
|
9b15373125139cab1d26294c218685c5b87b9709
|
refs/heads/master
| 2023-08-15T22:28:18.499733
| 2021-10-12T12:16:56
| 2021-10-12T12:16:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields,api
class ContainerConfig(models.TransientModel):
_name = 'container.config'
_description = "Container Configuration"
_inherit ='res.config.settings'
freight_account_id = fields.Many2one('account.account', string="Freight Account", config_parameter='freight_account_id', default='')
customs_clearence_account_id = fields.Many2one('account.account', config_parameter='customs_clearence_account_id', string="Customs Clearence Account")
|
[
"vinod@confianzit.biz"
] |
vinod@confianzit.biz
|
5e3c9bef5cc205d514052ea4a0687ae7ab1cd5a9
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_solariums.py
|
9e32ce0f592b4095252b978a893f5e61bf9a7f98
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _SOLARIUMS():
def __init__(self,):
self.name = "SOLARIUMS"
self.definitions = solarium
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['solarium']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
c1278210180978e06fd80b879ace3fb24f9d1b19
|
a2ba4451a2b0264cd5e65f393e370bc6c809bff0
|
/src/test/helpers/PDUnitTest.py
|
a4fea56ba5d68c096d3b71e9675adef2f8d15d19
|
[] |
no_license
|
Sinnach0/PDOauth
|
e1579a6a1047f2b770881acfa70bcc6860cd3a89
|
a49a267f5564a40c1b24cab0232b2ead1407e344
|
refs/heads/master
| 2020-12-26T01:59:31.985745
| 2015-06-09T09:52:09
| 2015-06-09T09:52:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
from twatson.unittest_annotations import Fixture, test # @UnusedImport
from pdoauth.Controller import Controller, Interfaced
from pdoauth.FlaskInterface import FlaskInterface
from test.helpers.FakeInterFace import FakeInterface, FakeMailer, TestData
from pdoauth.ReportedError import ReportedError
class PDUnitTest(Fixture):
def setUp(self):
self.setUpController()
def tearDown(self):
self.tearDownController()
def setUpController(self):
Interfaced.unsetInterface(FlaskInterface)
Interfaced.setInterface(FakeInterface)
FakeInterface._testdata = TestData()
self.controller = Controller.getInstance()
self.oldmail = getattr(self.controller,"mail", None)
self.controller.mail = FakeMailer()
def tearDownController(self):
Interfaced.unsetInterface(FakeInterface)
Interfaced.setInterface(FlaskInterface)
self.controller.mail = self.oldmail
def assertReportedError(self, funct, args, status, descriptor):
with self.assertRaises(ReportedError) as e:
funct(*args)
self.assertEquals(e.exception.status, status)
self.assertEqual(descriptor, e.exception.descriptor)
|
[
"mag@magwas.rulez.org"
] |
mag@magwas.rulez.org
|
65190f63674ba42b90a697afc3c471684de05db7
|
2c7f025568bceb560888d26828aef30e5ae23393
|
/bin/player.py
|
6f88955e7794fa49bd22cd7439f71727990972c0
|
[] |
no_license
|
GustavoCruz12/educacao
|
6271ebc71830ee1964f8311d3ef21ec8abf58e50
|
d0faa633ed1d588d84c74a3e15ccf5fa4dd9839e
|
refs/heads/master
| 2022-12-08T09:34:42.066372
| 2018-08-03T06:38:49
| 2018-08-03T06:38:49
| 143,387,426
| 0
| 0
| null | 2022-12-08T00:01:52
| 2018-08-03T06:31:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
#!/home/gustavo/Projetos/web_serrana/bin/python3
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.info.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
[
"gustavocruz201419@gmail.com"
] |
gustavocruz201419@gmail.com
|
90c8650602b1b7604791e96b1119c15da29e8c78
|
38382e23bf57eab86a4114b1c1096d0fc554f255
|
/hazelcast/protocol/codec/list_is_empty_codec.py
|
3c42ede40f79caab5b53e877250ec48b64b4f700
|
[
"Apache-2.0"
] |
permissive
|
carbonblack/hazelcast-python-client
|
e303c98dc724233376ab54270832bfd916426cea
|
b39bfaad138478e9a25c8a07f56626d542854d0c
|
refs/heads/gevent-3.12.3.1
| 2023-04-13T09:43:30.626269
| 2020-09-18T17:37:17
| 2020-09-18T17:37:17
| 110,181,474
| 3
| 1
|
Apache-2.0
| 2020-12-01T17:45:42
| 2017-11-10T00:21:55
|
Python
|
UTF-8
|
Python
| false
| false
| 940
|
py
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.codec.list_message_type import *
REQUEST_TYPE = LIST_ISEMPTY
RESPONSE_TYPE = 101
RETRYABLE = True
def calculate_size(name):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
return data_size
def encode_request(name):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
|
[
"arslanasim@gmail.com"
] |
arslanasim@gmail.com
|
316d6a4481b9b85c7182965171aa599101a81e14
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/1605.0_Find_Valid_Matrix_Given_Row_and_Column_Sums.py
|
b4fada7996fc9ea5767ddf55419bcfb31f7c78f9
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
'''
Greedy
T: O(M+N)
S: O(1)
执行用时:64 ms, 在所有 Python3 提交中击败了92.42% 的用户
内存消耗:19.6 MB, 在所有 Python3 提交中击败了48.48% 的用户
通过测试用例:84 / 84
'''
class Solution:
def restoreMatrix(self, rowSum: List[int], colSum: List[int]) -> List[List[int]]:
m, n = len(rowSum), len(colSum)
mat = [[0] * n for _ in range(m)]
i = j = 0
while i < m and j < n:
rs, cs = rowSum[i], colSum[j]
if rs < cs:
mat[i][j] = rs
colSum[j] -= rs
i += 1
else:
mat[i][j] = cs
rowSum[i] -= cs
j += 1
return mat
|
[
"laoxing201314@outlook.com"
] |
laoxing201314@outlook.com
|
9b9e97ebbd98b98e9738d5df74392cb0d6e2d21c
|
aa49120740b051eed9b7199340b371a9831c3050
|
/clone.py
|
701834c5ca32640f8f8f368b3578b0ea5477daa0
|
[] |
no_license
|
ashutosh-narkar/LeetCode
|
cd8d75389e1ab730b34ecd860b317b331b1dfa97
|
b62862b90886f85c33271b881ac1365871731dcc
|
refs/heads/master
| 2021-05-07T08:37:42.536436
| 2017-11-22T05:18:23
| 2017-11-22T05:18:23
| 109,366,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
#!/usr/bin/env python
'''
Clone an undirected graph. Each node in the graph contains a label and a list of its neighbors.
From the way the Node class is defined, this approach should work for directed graphs as well
'''
# Definition for a undirected graph node
class UndirectedGraphNode:
def __init__(self, x):
self.label = x
self.neighbors = []
from collections import deque
# Using BFS
def cloneGraph(node):
if not node:
return
# add nodes of the original graph to the queue
queue = deque()
queue.append(node)
nodeMap = {}
# create a new node
newNode = UndirectedGraphNode(node.label)
# add nodes of the new graph to the dict. This is similar to "visited" list in BFS
nodeMap[newNode.label] = newNode
while queue:
oldnode = queue.popleft()
for neigh in oldnode.neighbors:
if neigh.label not in nodeMap:
# add nodes from original graph to queue
queue.append(neigh)
# add nodes from new graph to dict
nodeMap[neigh.label] = UndirectedGraphNode(neigh.label)
# update the neighbours of the cloned node
nodeMap[oldnode.label].neighbors.append(nodeMap[neigh.label])
return newNode
###############################################
# Using DFS
def cloneGraph(node):
if not node:
return
nodeMap = {}
return dfs(node, nodeMap)
def dfs(oldNode, nodeMap):
newNode = UndirectedGraphNode(oldNode.label)
nodeMap[newNode.label] = newNode
for neigh in oldNode.neighbors:
if neigh.label not in nodeMap:
dfs(neigh, nodeMap)
# update the neighbours of the cloned node
newNode.neighbors.append(nodeMap[neigh.label])
return newNode
|
[
"ashutosh.narkar@one.verizon.com"
] |
ashutosh.narkar@one.verizon.com
|
be09094636d2801e89169504b074ee1057aed514
|
74649c1220c68ad0af79e420d572e3769fcd7a53
|
/mlprodict/onnx_tools/optim/onnx_optimisation_identity.py
|
c96948edb77f4f8c561404cd8813ec7f9e918e5f
|
[
"MIT"
] |
permissive
|
sdpython/mlprodict
|
e62edcb428700cb2c4527e54e96431c1d2b36118
|
27d6da4ecdd76e18292f265fde61d19b66937a5c
|
refs/heads/master
| 2023-05-08T10:44:30.418658
| 2023-03-08T22:48:56
| 2023-03-08T22:48:56
| 112,469,804
| 60
| 13
|
MIT
| 2023-04-19T01:21:38
| 2017-11-29T11:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 8,112
|
py
|
"""
@file
@brief Optimisation of :epkg:`ONNX` graphs.
"""
import logging
from onnx import FunctionProto, AttributeProto
from onnx.helper import make_graph, make_function
from ._onnx_optimisation_common import ( # pylint: disable=E0611
_rename_node_input,
_rename_node_output,
_apply_optimisation_on_graph,
_apply_remove_node_fct_node)
logger = logging.getLogger('onnx:optim')
def onnx_remove_node_identity(onnx_model, recursive=True, debug_info=None, **options):
"""
Removes as many *Identity* nodes as possible.
The function looks into every node and subgraphs if
*recursive* is True for identity node. Unless such a
node directy connects one input to one output, it will
be removed and every other node gets its inputs or
outputs accordingly renamed.
:param onnx_model: onnx model
:param recursive: looks into subgraphs
:param debug_info: debug information (private)
:param options: additional options (unused)
:return: new onnx _model
"""
if debug_info is None:
debug_info = [str(type(onnx_model)).rsplit(
'.', maxsplit=1)[-1].strip("'>")]
else:
debug_info = (debug_info +
[str(type(onnx_model)).rsplit('.', maxsplit=1)[-1].strip("'>")])
if hasattr(onnx_model, 'graph'):
return _apply_optimisation_on_graph(
onnx_remove_node_identity, onnx_model,
recursive=recursive, debug_info=debug_info, **options)
graph = onnx_model
logger.debug("onnx_remove_node_identity:begin with %d nodes.",
len(graph.node))
is_function = isinstance(graph, FunctionProto)
if is_function:
inputs = set(graph.input)
outputs = set(graph.output)
else:
inputs = set(i.name for i in graph.input)
inits = set(i.name for i in graph.initializer)
inputs_inits = inputs.union(inits)
outputs = set(o.name for o in graph.output)
def retrieve_idnodes(graph, existing_nodes):
idnodes = []
for i, exnode in enumerate(existing_nodes):
if exnode is None:
continue
if exnode.op_type == 'Identity':
input = exnode.input[0]
output = exnode.output[0]
idnodes.append((i, exnode, input, output))
return idnodes
# add to output the list of local variables in subgraphs
def append_local_variable(graph, known=None, subgraph=True):
if known is None:
known = set()
else:
known = known.copy()
local_var = set()
if isinstance(graph, FunctionProto):
known = set(graph.input)
else:
known = set(i.name for i in graph.input)
known |= set(i.name for i in graph.initializer)
for node in graph.node:
for i in node.input:
if i not in known and subgraph:
local_var.add(i)
for o in node.output:
known.add(o)
for att in node.attribute:
if (att.type == AttributeProto.GRAPH and # pylint: disable=E1101
hasattr(att, 'g') and att.g is not None):
lv = append_local_variable(att.g, known)
local_var |= lv
return local_var
local_vars = append_local_variable(graph, subgraph=False)
logger.debug('onnx_remove_node_identity:local_vars:%r', local_vars)
ext_outputs = outputs | local_vars
nodes = list(graph.node)
rem = 1
while rem > 0:
rem = 0
idnodes = retrieve_idnodes(graph, nodes)
restart = False
for i, _, inp, out in idnodes:
if restart:
break # pragma: no cover
if nodes[i] is None:
# Already removed.
continue # pragma: no cover
if inp in inputs_inits and out in ext_outputs:
# Cannot be removed.
continue
if not restart and out not in ext_outputs:
# We cannot change an output name.
for j in range(len(nodes)): # pylint: disable=C0200
if nodes[j] is None:
continue
if out in nodes[j].input:
logger.debug('onnx_remove_node_identity:'
'_rename_node_input:%s:%r->%r:'
'out=%r:inp=%r',
nodes[j].op_type, nodes[j].input,
nodes[j].output, out, inp)
nodes[j] = _rename_node_input(nodes[j], out, inp)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True # pragma: no cover
logger.debug('onnx_remove_node_identity:1:remove:%s:%r->%r:',
nodes[i].op_type, nodes[i].input, nodes[i].output)
nodes[i] = None
rem += 1
continue
if not restart and inp not in inputs_inits and inp not in ext_outputs:
# We cannot change an input name or an output name.
for j in range(len(nodes)): # pylint: disable=C0200
if nodes[j] is None:
continue
if inp in nodes[j].output:
logger.debug('onnx_remove_node_identity:'
'_rename_node_output:%s:%r->%r:'
'inp=%r:out=%r',
nodes[j].op_type, nodes[j].input,
nodes[j].output, inp, out)
nodes[j] = _rename_node_output(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True # pragma: no cover
if inp in nodes[j].input:
logger.debug('onnx_remove_node_identity:'
'_rename_node_input:%s:%r->%r:'
'inp=%r:out=%r',
nodes[j].op_type, nodes[j].input,
nodes[j].output, inp, out)
nodes[j] = _rename_node_input(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
logger.debug('onnx_remove_node_identity:2:remove:%s:%r->%r:',
nodes[i].op_type, nodes[i].input, nodes[i].output)
nodes[i] = None
rem += 1
if recursive:
# Handles subgraphs.
for i in range(len(nodes)): # pylint: disable=C0200
node = nodes[i]
if node is None or not (node.attribute): # pylint: disable=C0325
continue
nodes[i] = _apply_remove_node_fct_node(
onnx_remove_node_identity,
node, recursive=True, debug_info=debug_info + [node.name])
# Finally create the new graph.
nodes = list(filter(lambda n: n is not None, nodes))
if len(nodes) == 0:
# something went wrong
nodes = list(graph.node)
if is_function:
logger.debug("onnx_remove_node_identity:end function with %d nodes.",
len(nodes))
return make_function(
onnx_model.domain, onnx_model.name,
onnx_model.input, onnx_model.output, nodes,
opset_imports=onnx_model.opset_import,
attributes=onnx_model.attribute,
doc_string=onnx_model.doc_string)
graph = make_graph(nodes, onnx_model.name,
onnx_model.input, onnx_model.output,
onnx_model.initializer)
graph.value_info.extend(onnx_model.value_info) # pylint: disable=E1101
logger.debug("onnx_remove_node_identity: end graph with %d nodes.",
len(nodes))
return graph
|
[
"noreply@github.com"
] |
sdpython.noreply@github.com
|
35a65c4ea014c1aed94e50ec5bada17dacf43f2b
|
3fc19cb92237b0823cafbfc66600c8129b1c2245
|
/linesOnMap.py
|
f745da58f3775021f35d3ff2eff56fe17d98b9a4
|
[] |
no_license
|
rayyan-khan/4-railroad-lab
|
f47600305447565d89056c42f06bf03d066204a0
|
c51160dddcb17cda7739742cf93875fb431b7170
|
refs/heads/master
| 2022-02-26T08:30:47.103538
| 2018-12-15T02:59:05
| 2018-12-15T02:59:05
| 161,864,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,356
|
py
|
from tkinter import *
# set US map to background
window = Tk()
window.title('Railroad Lab')
window.config(bg='white')
photo = PhotoImage(file = 'map.gif')
w = photo.width()
h = photo.height()
window.geometry('{}x{}'.format(w, h))
print(w, h)
canvas = Canvas(window, width=w, height=h)
canvas.pack()
canvas.create_image(w/2, h/2, image=photo)
# import files
rrNodes = open('rrNodes.txt', 'r') # id, latitude, longitude
rrEdges = open('rrEdges.txt', 'r') # id1, id2 (an edge exists between them)
dictNodes = {} # dictNodes = {id: (latitude, longitude)}
for id in rrNodes:
id = id.strip().split(' ')
dictNodes[id[0]] = (float(id[1]), float(id[2]))
def transformLatitude(latitude):
latitude = latitude + 146
return latitude*6.5
def transformLongitude(longitude):
longitude = longitude*-1 + 81
return longitude*9
for pair in rrEdges: # station1, station2, that are connected
pair = pair.strip().split(' ')
point1 = pair[0] # first station id
point2 = pair[1] # second station id
p1Latitude = transformLatitude(dictNodes[point1][1])
p1Longitude = transformLongitude(dictNodes[point1][0])
p2Latitude = transformLatitude(dictNodes[point2][1])
p2Longitude = transformLongitude(dictNodes[point2][0])
canvas.create_line(p1Latitude, p1Longitude, p2Latitude, p2Longitude, fill='red')
mainloop()
|
[
"rayyan.khan258@gmail.com"
] |
rayyan.khan258@gmail.com
|
de84e59db82f5181eb521a99d4b8e39400fa5147
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/bHTb8p5nybCrjFPze_15.py
|
21f824539d57198450f4e378f2ea821112ddbff4
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
"""
Write a function that, given the start `start_num` and end `end_num` values,
return a list containing all the numbers **inclusive** to that range. See
examples below.
### Examples
inclusive_list(1, 5) ➞ [1, 2, 3, 4, 5]
inclusive_list(2, 8) ➞ [2, 3, 4, 5, 6, 7, 8]
inclusive_list(10, 20) ➞ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
inclusive_list(17, 5) ➞ [17]
### Notes
* The numbers in the list are sorted in ascending order.
* If `start_num` is greater than `end_num`, return a list with the higher value. See example #4.
* A recursive version of this of challenge can be found [here](https://edabit.com/challenge/CoSFaDzSxrSjsZ8F6).
"""
def inclusive_list(start_num, end_num):
lst = [start_num]
if start_num >= end_num: return lst
else:
for i in range(1, (end_num - start_num)+1):
lst.append(start_num + i)
return lst
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
97fe0c0adb566ece112ebb0a0b205498a4739980
|
99fc570cc293c971a72fa88434bf9e39b2e45f19
|
/watermarking/migrations/0007_auto_20181019_1535.py
|
729597cfa04aff9b38bb283ce3328c1de2c71d6c
|
[] |
no_license
|
EroPerez/dhmdi
|
94d4e6812fa0b098af95462faa45c004cb503c7d
|
73dbeac369fc4bd8f59209c65189f0872d8980a1
|
refs/heads/master
| 2020-04-08T18:13:47.463678
| 2018-11-28T17:03:06
| 2018-11-28T17:03:06
| 159,599,005
| 2
| 0
| null | 2018-11-29T03:00:26
| 2018-11-29T03:00:26
| null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
# Generated by Django 2.0 on 2018-10-19 19:35
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('watermarking', '0006_auto_20181019_1451'),
]
operations = [
migrations.AlterField(
model_name='watermarking',
name='created_at',
field=models.DateField(default=datetime.datetime(2018, 10, 19, 19, 35, 34, 33740, tzinfo=utc)),
),
]
|
[
"eadomenech@gmail.com"
] |
eadomenech@gmail.com
|
e1f0d3b16b4b79ebae8525dc1ebdc02cf7f8ca01
|
ac4b9385b7ad2063ea51237fbd8d1b74baffd016
|
/.history/google/drive_quickstart_20210213174716.py
|
bb8564f06bc3db10016ca2751aac0b644314d4b6
|
[] |
no_license
|
preethanpa/ssoemprep
|
76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f
|
ce37127845253c768d01aeae85e5d0d1ade64516
|
refs/heads/main
| 2023-03-09T00:15:55.130818
| 2021-02-20T06:54:58
| 2021-02-20T06:54:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,466
|
py
|
from __future__ import print_function
import pickle
import os.path
import io
from googleapiclient.discovery import MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE, build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
from oauth2client.service_account import ServiceAccountCredentials
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/documents.readonly', 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.activity', 'https://www.googleapis.com/auth/drive.metadata', 'https://www.googleapis.com/auth/drive']# 'https://www.googleapis.com/auth/documents.readonly']
# The ID of a sample document.
# DOCUMENT_ID = '1bQkFcQrWFHGlte8oTVtq_zyKGIgpFlWAS5_5fi8OzjY'
DOCUMENT_ID = '1sXQie19gQBRHODebxBZv4xUCJy-9rGpnlpM7_SUFor4'
def main():
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
from google.oauth2 import service_account
import googleapiclient.discovery
SCOPES = ['https://www.googleapis.com/auth/documents', 'https://www.googleapis.com/auth/documents.readonly', 'https://www.googleapis.com/auth/documents.readonly', 'https://www.googleapis.com/auth/sqlservice.admin', 'https://www.googleapis.com/auth/drive.file']
SERVICE_ACCOUNT_FILE = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/google/domain-wide-credentials-gdrive.json'
# SERVICE_ACCOUNT_FILE = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/google/app-automation-service-account-thirdrayai-1612747564720-415d6ebd6001.json'
credentials = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=SCOPES)
#, subject='abhi@third-ray.com')
# # service = build('docs', 'v1', credentials=credentials)
drive_service = build('drive', 'v3', credentials=credentials)#.with_subject('abhi@third-ray.com'))
# print(drive_service)
print(dir(docs_service.documents()))
request = drive_service.files().export(fileId=DOCUMENT_ID, mimeType='application/pdf')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print(f"Download % {int(status.progress() * 100)}")
if __name__ == '__main__':
main()
|
[
"{abhi@third-ray.com}"
] |
{abhi@third-ray.com}
|
5c7ebc2db699834376e7efedc26192f0f6ca1708
|
fb46c7eb0e8108e59afff177b2d2ce00eb9a78cf
|
/pyomo/dae/tests/test_flatten.py
|
9ccf2cc8dfb7b705f241d3f32e6f04a48b7cd0c1
|
[
"BSD-3-Clause"
] |
permissive
|
qtothec/pyomo
|
823d6f683e29fc690564047ca5066daaf14d4f36
|
ab4ada5a93aed570a6e6ca6161462e970cffe677
|
refs/heads/new_dev
| 2022-06-09T15:42:16.349250
| 2020-05-18T00:37:24
| 2020-05-18T00:37:24
| 61,325,214
| 3
| 5
|
NOASSERTION
| 2019-12-09T04:03:56
| 2016-06-16T20:54:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,325
|
py
|
# ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
import pyutilib.th as unittest
from pyomo.environ import ConcreteModel, Block, Var, Reference
from pyomo.dae import ContinuousSet
# This inport will have to change when we decide where this should go...
from pyomo.dae.flatten import flatten_dae_variables
class TestCategorize(unittest.TestCase):
def _hashRef(self, ref):
return tuple(sorted(id(_) for _ in ref.values()))
def test_flat_model(self):
m = ConcreteModel()
m.T = ContinuousSet(bounds=(0,1))
m.x = Var()
m.y = Var([1,2])
m.a = Var(m.T)
m.b = Var(m.T, [1,2])
m.c = Var([3,4], m.T)
regular, time = flatten_dae_variables(m, m.T)
regular_id = set(id(_) for _ in regular)
self.assertEqual(len(regular), 3)
self.assertIn(id(m.x), regular_id)
self.assertIn(id(m.y[1]), regular_id)
self.assertIn(id(m.y[2]), regular_id)
# Output for debugging
#for v in time:
# v.pprint()
# for _ in v.values():
# print" -> ", _.name
ref_data = {
self._hashRef(Reference(m.a[:])),
self._hashRef(Reference(m.b[:,1])),
self._hashRef(Reference(m.b[:,2])),
self._hashRef(Reference(m.c[3,:])),
self._hashRef(Reference(m.c[4,:])),
}
self.assertEqual(len(time), len(ref_data))
for ref in time:
self.assertIn(self._hashRef(ref), ref_data)
def test_1level_model(self):
m = ConcreteModel()
m.T = ContinuousSet(bounds=(0,1))
@m.Block([1,2],m.T)
def B(b, i, t):
b.x = Var(list(range(2*i, 2*i+2)))
regular, time = flatten_dae_variables(m, m.T)
self.assertEqual(len(regular), 0)
# Output for debugging
#for v in time:
# v.pprint()
# for _ in v.values():
# print" -> ", _.name
ref_data = {
self._hashRef(Reference(m.B[1,:].x[2])),
self._hashRef(Reference(m.B[1,:].x[3])),
self._hashRef(Reference(m.B[2,:].x[4])),
self._hashRef(Reference(m.B[2,:].x[5])),
}
self.assertEqual(len(time), len(ref_data))
for ref in time:
self.assertIn(self._hashRef(ref), ref_data)
def test_2level_model(self):
m = ConcreteModel()
m.T = ContinuousSet(bounds=(0,1))
@m.Block([1,2],m.T)
def B(b, i, t):
@b.Block(list(range(2*i, 2*i+2)))
def bb(bb, j):
bb.y = Var([10,11])
b.x = Var(list(range(2*i, 2*i+2)))
regular, time = flatten_dae_variables(m, m.T)
self.assertEqual(len(regular), 0)
# Output for debugging
#for v in time:
# v.pprint()
# for _ in v.values():
# print" -> ", _.name
ref_data = {
self._hashRef(Reference(m.B[1,:].x[2])),
self._hashRef(Reference(m.B[1,:].x[3])),
self._hashRef(Reference(m.B[2,:].x[4])),
self._hashRef(Reference(m.B[2,:].x[5])),
self._hashRef(Reference(m.B[1,:].bb[2].y[10])),
self._hashRef(Reference(m.B[1,:].bb[2].y[11])),
self._hashRef(Reference(m.B[1,:].bb[3].y[10])),
self._hashRef(Reference(m.B[1,:].bb[3].y[11])),
self._hashRef(Reference(m.B[2,:].bb[4].y[10])),
self._hashRef(Reference(m.B[2,:].bb[4].y[11])),
self._hashRef(Reference(m.B[2,:].bb[5].y[10])),
self._hashRef(Reference(m.B[2,:].bb[5].y[11])),
}
self.assertEqual(len(time), len(ref_data))
for ref in time:
self.assertIn(self._hashRef(ref), ref_data)
# TODO: Add tests for Sets with dimen==None
if __name__ == "__main__":
unittest.main()
|
[
"jsiirola@users.noreply.github.com"
] |
jsiirola@users.noreply.github.com
|
524ffd950e6b5b0334c1fbefee5c7036da505cf8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2235/60614/303084.py
|
6cd63f840ba7468c2f277dfeb99b91a286eebce6
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
def other(num):
if num%2==0:
return num+1
else:
return num-1
init = [int(x) for x in input().split()]
n = init[0]*2
m = init[1]
pairs=[]
edge=[[] for i in range(n)]
for i in range(m):
temp=[int(x) for x in input().split()]
edge[temp[0]-1].append(other(temp[1]-1))
edge[temp[1]-1].append(other(temp[0]-1))
ans=[-1]*n
def dye():
color = [0] * n
for i in range(n):
if (color[i]!=0):
continue
count=0
diaoYong=dfs(i,color,count)
color=diaoYong[1]
count=diaoYong[2]
if (not diaoYong[0]):
for j in range(count):
color[ans[j]]=0
color[other(ans[j])]=0
count=0
diaoYong = dfs(other(i), color, count)
color = diaoYong[1]
if (not diaoYong[0]):
return [False,color]
return [True,color]
def dfs(x,color,count):
if (color[x]==1):
return [True,color,count]
if (color[x]==2):
return [False,color,count]
color[x]=1
color[other(x)]=2
count+=1
ans[count]=x
for i in range(len(edge[x])):
diGui=dfs(edge[x][i],color,count)
color=diGui[1]
count=diGui[2]
if (not diGui[0]):
return [False,color,count]
return [True,color,count]
result=dye()
if (result[0]):
color=result[1]
for i in range(n):
if (color[i]==1):
print(i+1)
else:
print("NIE")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
caee1b2c16ec8d413abb4a20682ec50cc7eebdc8
|
88963a0e0ddf47e666a3acd030cfa3050693c2bd
|
/globMatching.py
|
372abb445e911784d3d376da14874bbf5fff51a9
|
[] |
no_license
|
santoshparmarindia/algoexpert
|
9728054bbebb4e9413fe3c7cb75463b959046191
|
d71397bef5bf3a76735b2e4ef8ee808919209b8c
|
refs/heads/main
| 2023-06-01T15:44:28.296437
| 2021-06-15T23:03:19
| 2021-06-15T23:03:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
def globMatching(fileName, pattern):
locations = getLocationOfSymbols(pattern)
for pos in locations:
if pattern[pos]=="*":
stringBefore=fileName[len(fileName)-pos]
def getLocationOfSymbols(pattern):
return [idx for idx in range(len(pattern)) if pattern[idx] == "*" or pattern[idx] == "?"]
|
[
"caleberioluwa@gmail.com"
] |
caleberioluwa@gmail.com
|
a2f04deabb3eb3ccda22c389cf99ce25e63a77dc
|
57eb44ce1d84aca3580e28688cf645db483d0d03
|
/accounts/views/user.py
|
06e3bab546f4abebad577e240d55e3488e345733
|
[
"Apache-2.0"
] |
permissive
|
TheLabbingProject/pylabber
|
cbfd7a6663d56f779dde96bd6e0281f5c8f06393
|
4b51065f457ab86ed311f222080187caf1979fea
|
refs/heads/master
| 2023-04-08T05:29:08.356479
| 2023-03-29T09:06:11
| 2023-03-29T09:06:11
| 205,411,164
| 5
| 3
|
Apache-2.0
| 2023-03-29T09:06:13
| 2019-08-30T15:40:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
"""
Definition of the :class:`UserViewSet` class.
"""
from accounts.filters.user import UserFilter
from accounts.serializers.user import UserSerializer
from django.contrib.auth import get_user_model
from django.db.models import QuerySet
from pylabber.views.defaults import DefaultsMixin
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
User = get_user_model()
class UserViewSet(DefaultsMixin, viewsets.ModelViewSet):
"""
API endpoint that allows :class:`~accounts.models.user.User` instances to
be viewed or edited.
"""
filter_class = UserFilter
queryset = User.objects.all().order_by("date_joined")
serializer_class = UserSerializer
def filter_queryset(self, queryset) -> QuerySet:
"""
Filter the returned users according to the requesting user's
permissions.
Parameters
----------
queryset : QuerySet
Base queryset
Returns
-------
QuerySet
User instances
"""
user = self.request.user
queryset = super().filter_queryset(queryset)
if user.is_staff or user.is_superuser:
return queryset
return queryset.filter(laboratory__in=user.laboratory_set.all())
@action(detail=False, methods=["get"])
def get_institutions(self, request):
queryset = self.get_queryset()
institutions = set(
value
for value in queryset.values_list("profile__institute", flat=True)
if value is not None
)
data = {"results": institutions}
return Response(data)
|
[
"z.baratz@gmail.com"
] |
z.baratz@gmail.com
|
8d31ea4c5eee17006526e8dbb68804018da6797d
|
c9dc1df17ecb9e279eb4403b83358363cdbe7fee
|
/project/cms/migrations/0020_auto_20180302_0513.py
|
55c47d006cd522af0420e248798d77367eeb50f8
|
[] |
no_license
|
m0nte-cr1st0/keyua
|
c3894a94c9bfe73409078be11cb1d3f64831054c
|
b964ebb7e260fbebdbc27e3a571fed6278196cac
|
refs/heads/master
| 2022-11-25T16:03:51.882386
| 2020-01-09T12:57:54
| 2020-01-09T12:57:54
| 232,809,529
| 0
| 0
| null | 2022-11-22T02:24:49
| 2020-01-09T12:58:10
|
Python
|
UTF-8
|
Python
| false
| false
| 656
|
py
|
# Generated by Django 2.0 on 2018-03-02 05:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20180226_0554'),
('cms', '0019_page_show_on_category_menu'),
]
operations = [
migrations.RemoveField(
model_name='blogcomponent',
name='title',
),
migrations.AddField(
model_name='blogcomponent',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Category'),
),
]
|
[
"dinamo.mutu111@gmail.com"
] |
dinamo.mutu111@gmail.com
|
c602d0dde6c12d0b6e4e1af055d8d9bba4a4991c
|
2310666ca3de8c5e41b1fb7268632015b0658dde
|
/leetcode/0013_roman_to_integer.py
|
6cdd3a3254113bc451c81ea965e8f537a2674e3a
|
[
"BSD-2-Clause"
] |
permissive
|
chaosWsF/Python-Practice
|
72196aa65a76dd27e1663e50502954dc07f4cad6
|
49a0b03c55d8a702785888d473ef96539265ce9c
|
refs/heads/master
| 2022-08-16T13:04:41.137001
| 2022-08-16T06:17:01
| 2022-08-16T06:17:01
| 144,381,839
| 1
| 0
| null | 2021-09-16T03:58:32
| 2018-08-11T12:02:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,037
|
py
|
"""
Roman numerals are represented by seven different symbols:
I, V, X, L, C, D and M.
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
For example, two is written as II in Roman numeral, just two
one's added together. Twelve is written as, XII, which is
simply X + II. The number twenty seven is written as XXVII,
which is XX + V + II.
Roman numerals are usually written largest to smallest from
left to right. However, the numeral for four is not IIII.
Instead, the number four is written as IV. Because the one is
before the five we subtract it making four. The same principle
applies to the number nine, which is written as IX. There are
six instances where subtraction is used:
I can be placed before V (5) and X (10) to make 4 and 9.
X can be placed before L (50) and C (100) to make 40 and 90.
C can be placed before D (500) and M (1000) to make 400 and 900.
Given a roman numeral, convert it to an integer. Input is
guaranteed to be within the range from 1 to 3999.
Example 1:
Input: "III"
Output: 3
Example 2:
Input: "IV"
Output: 4
Example 3:
Input: "IX"
Output: 9
Example 4:
Input: "LVIII"
Output: 58
Explanation: L = 50, V= 5, III = 3.
Example 5:
Input: "MCMXCIV"
Output: 1994
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
"""
class Solution:
def romanToInt(self, s: str) -> int:
d = {
'I':1,
'V':5,
'X':10,
'L':50,
'C':100,
'D':500,
'M':1000,
'IV':4,
'IX':9,
'XL':40,
'XC':90,
'CD':400,
'CM':900
}
i = 0
res = 0
while i < len(s):
if s[i:i+2] in d:
res += d[s[i:i+2]]
i += 2
else:
res += d[s[i]]
i += 1
return res
def romanToInt1(self, s):
"""monotic decreasing if no specific instance"""
map_dict = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
n = map_dict[s[0]]
for i in range(1, len(s)):
n += map_dict[s[i]]
if map_dict[s[i]] > map_dict[s[i - 1]]:
n -= 2 * map_dict[s[i - 1]]
return n
def romanToInt2(self, s):
"""mapping (step = 1 or 2)"""
map_dict = {
'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000,
'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900
}
n = len(s)
if n < 2:
return map_dict[s]
i = 0
result = 0
while i < n:
try:
result += map_dict[s[i:i + 2]]
i += 2
except KeyError:
result += map_dict[s[i]]
i += 1
return result
|
[
"25746440+chaosWsF@users.noreply.github.com"
] |
25746440+chaosWsF@users.noreply.github.com
|
9ae6fece0afe3beedd05a6cb5236776f8a8a70b0
|
e173bfaa3c728a3ce7a7709f1beee5c5a64a0e2a
|
/LandMatrixExtractor/es/weso/landmatrix/entities/deal_analyser_entry.py
|
e5fc06b9c89ff4a8c576f6f72ed6ec143094e308
|
[
"Unlicense"
] |
permissive
|
weso/landportal-importers
|
d4ddfe0db298a9c2f51820f714b21ff5c570291d
|
6edfa3c301422bbe8c09cb877b1cbddbcd902463
|
refs/heads/master
| 2020-03-30T08:08:31.457937
| 2014-07-15T11:31:28
| 2014-07-15T11:31:28
| 15,933,682
| 0
| 0
| null | 2014-04-07T09:15:12
| 2014-01-15T11:47:44
| null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
__author__ = 'Dani'
class DealAnalyserEntry(object):
"""
The DealAnalyser will return a dict that saves under a key composed by Country and indicator
certains group of entities.
This class contains all this elements: an indicator, a date (int), a country (country entity) and a value
"""
def __init__(self, indicator, date, country, value):
self.indicator = indicator
self.date = date
self.country = country
self.value = value
|
[
"danifdezalvarez@gmail.com"
] |
danifdezalvarez@gmail.com
|
733747b606dec6f402a5ea55269c848673bb82c8
|
4e3ce5a1fc39c00d28b42f61c15cc54f65e706f0
|
/leetcode/101-200/T164_maximumGap.py
|
5d559312515ac2a5c70ae408e8cd69df9eaf6dd8
|
[] |
no_license
|
PemLer/Journey_of_Algorithm
|
6c2a1d1f2bb9e1cff5239857dd33747b14127dfd
|
c0a51f0054446d54f476cf8f1cd3e6268dcd2b35
|
refs/heads/master
| 2023-03-31T01:57:09.223491
| 2020-06-23T11:25:27
| 2020-06-23T11:25:27
| 178,988,861
| 2
| 0
| null | 2020-06-23T11:25:29
| 2019-04-02T03:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
class Solution(object):
def maximumGap(self,num):
if len(num) < 2 or min(num) == max(num):
return 0
a, b = min(num), max(num)
size = (b-a)//(len(num)-1) or 1
bucket = [[None, None] for _ in range((b-a)//size+1)]
for n in num:
b = bucket[(n-a)//size]
b[0] = n if b[0] is None else min(b[0], n)
b[1] = n if b[1] is None else max(b[1], n)
bucket = [b for b in bucket if b[0] is not None]
return max(bucket[i][0]-bucket[i-1][1] for i in range(1, len(bucket)))
|
[
"1130527871@qq.com"
] |
1130527871@qq.com
|
8efd18901c9f572c09ee983bae120fa0e3bed0ec
|
fe427adf26411595c447ce880436383bb7c565e2
|
/exps/inverted_pendulum/run.py
|
311d66ab5798e744c469cbb8f03f9520be5aaf10
|
[
"MIT"
] |
permissive
|
LWANG0413/hucrl
|
81023314bc2d32abe9a3bdda5e97c4f3d88cd1db
|
f09076bb7083aeef62bdaa129f485bab9a5fa685
|
refs/heads/master
| 2023-04-14T18:57:51.237180
| 2021-03-26T16:13:28
| 2021-03-26T16:13:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
"""Run the inverted-pendulum using MB-MPO."""
from dotmap import DotMap
from exps.inverted_pendulum import (
ACTION_COST,
ENVIRONMENT_MAX_STEPS,
TRAIN_EPISODES,
get_agent_and_environment,
)
from exps.inverted_pendulum.plotters import (
plot_pendulum_trajectories,
set_figure_params,
)
from exps.inverted_pendulum.util import get_mbmpo_parser
from exps.util import train_and_evaluate
PLAN_HORIZON, SIM_TRAJECTORIES = 8, 16
parser = get_mbmpo_parser()
parser.description = "Run Swing-up Inverted Pendulum using Model-Based MPO."
parser.set_defaults(
action_cost=ACTION_COST,
train_episodes=TRAIN_EPISODES,
environment_max_steps=ENVIRONMENT_MAX_STEPS,
plan_horizon=PLAN_HORIZON,
sim_num_steps=ENVIRONMENT_MAX_STEPS,
sim_initial_states_num_trajectories=SIM_TRAJECTORIES // 2,
sim_initial_dist_num_trajectories=SIM_TRAJECTORIES // 2,
model_kind="ProbabilisticEnsemble",
model_learn_num_iter=50,
model_opt_lr=1e-3,
seed=1,
)
args = parser.parse_args()
params = DotMap(vars(args))
environment, agent = get_agent_and_environment(params, "mbmpo")
set_figure_params(serif=True, fontsize=9)
train_and_evaluate(
agent, environment, params, plot_callbacks=[plot_pendulum_trajectories]
)
|
[
"sebastian.curi@inf.ethz.ch"
] |
sebastian.curi@inf.ethz.ch
|
0b2e693e5580b814554653d9ffdd9871c366fd33
|
82b728e805d887102c0b8c415731b353877690cd
|
/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py
|
fdf813f6f78ca224219d688d99fd770c3fbc9267
|
[
"Apache-2.0"
] |
permissive
|
geraint0923/python-aiplatform
|
90c7742c9bdbde05b9688b117e8e59c0406d6f85
|
7ab05d5e127636d96365b7ea408974ccd6c2f0fe
|
refs/heads/main
| 2023-08-24T05:30:38.519239
| 2021-10-27T20:38:25
| 2021-10-27T20:38:25
| 370,803,114
| 0
| 0
|
Apache-2.0
| 2021-05-25T19:15:47
| 2021-05-25T19:15:46
| null |
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchReadFeatureValues
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_sync]
from google.cloud import aiplatform_v1
def sample_batch_read_feature_values():
"""Snippet for batch_read_feature_values"""
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
csv_read_instances = aiplatform_v1.CsvSource()
csv_read_instances.gcs_source.uris = ['uris_value']
destination = aiplatform_v1.FeatureValueDestination()
destination.bigquery_destination.output_uri = "output_uri_value"
entity_type_specs = aiplatform_v1.EntityTypeSpec()
entity_type_specs.entity_type_id = "entity_type_id_value"
entity_type_specs.feature_selector.id_matcher.ids = ['ids_value']
request = aiplatform_v1.BatchReadFeatureValuesRequest(
csv_read_instances=csv_read_instances,
featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}",
destination=destination,
entity_type_specs=entity_type_specs,
)
# Make the request
operation = client.batch_read_feature_values(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_sync]
|
[
"noreply@github.com"
] |
geraint0923.noreply@github.com
|
67e01004501971d37fce799ec2e4c98ba5c2c2cd
|
efe1546fa1f057cbbbe974bd8478309b6176d641
|
/waf/tests/preproc/wscript
|
8b6a5c1d28eb03a8d92149cdc9dbd61bd15a6fee
|
[
"Apache-2.0"
] |
permissive
|
yankee14/reflow-oven-atmega328p
|
2df323aba16ac4f3eac446abc633a5d79a1a55cb
|
e6792143576f13f0a3a49edfd54dbb2ef851d95a
|
refs/heads/master
| 2022-12-02T21:32:39.513878
| 2019-05-30T06:25:12
| 2019-05-30T06:25:12
| 188,760,664
| 0
| 1
|
Apache-2.0
| 2022-11-15T18:22:50
| 2019-05-27T02:52:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,441
|
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2012 (ita)
VERSION='0.0.1'
APPNAME='preproc_test'
top = '.'
out = 'build'
from waflib import Utils
from waflib.Tools import c_preproc
from waflib.Tools.c_preproc import NUM, OP, IDENT
from waflib.Logs import pprint
def configure(conf):
pass
def build(bld):
bld.failure = 0
def disp(color, result):
pprint(color, result)
if color == 'RED':
bld.failure=1
def stop_status(bld):
if bld.failure:
bld.fatal('One or several test failed, check the outputs above')
bld.add_post_fun(stop_status)
defs = {
'm1' : "m1 9 + 9",
'fun0' : "fun0(x, y) x y",
'fun1' : "fun1(x, y) x ## y",
'fun2' : "fun2(x) #x",
'fun3' : "fun3(x, y) x * y",
'fun4' : "fun4(x) fun2(x)",
'fun5' : "fun5(x, y, z) x ## y ## z",
'fun6' : "fun6(x, y) <x.y>",
'fun7' : "fun() 7",
}
def test(x, result, fun=c_preproc.reduce_tokens):
toks = c_preproc.tokenize(x)
c_preproc.reduce_tokens(toks, defs, [])
ret = c_preproc.stringize(toks)
if ret == result:
color = "GREEN"
else:
color = "RED"
disp(color, "%s\t\t%r" % (ret, toks))
test("1 + m1 + 1", "1+9+9+1")
test("1 + fun0(1, +) 1", "1+1+1")
test("fun2(mmm)", "mmm")
test("m1", "9+9")
test("fun2(m1)", "m1")
test("fun4(m1)", "9+9")
test("fun1(m, m)", "mm")
test("fun5(a, b, c)", "abc")
test("fun1(>, =)", ">=")
test("fun1(a, 12)", "a12")
test("fun5(a, _, 12)", "a_12")
test("fun6(math, h)", "<math.h>")
def test(x, result):
ret = c_preproc.extract_include(x, defs)
if ret == result:
color = "GREEN"
else:
color = "RED"
disp(color, "%s" % str(ret))
test("fun6(math, h)", ("<", "math.h"))
def test(x, result):
toks = c_preproc.tokenize(x)
c_preproc.reduce_tokens(toks, defs, [])
(_, ret) = c_preproc.reduce_eval(toks)
if int(ret) == result:
color = "GREEN"
else:
color = "RED"
disp(color, "%s\t\t%r" % (ret, toks))
test("1+1", 2)
test("1-1", 0)
test("1?77:0", 77)
test("0?0:88", 88)
test("1+2*3", 7)
test("1*2+3", 5)
test("7*m1*3", 90)
test("m1*3", 36)
test("defined m1", 1)
test("defined(m1)", 1)
test("defined inex", 0)
test("defined(inex)", 0)
test("fun7()", 7)
test("0&&2<3", 0)
test("(5>1)*6", 6)
test("1,2,3*9,9", 9)
test("0x52 > 02", 1)
# lazy evaluation
test("defined(foo) && foo > 2", 0)
test("defined(m1) && m1 > 20", 0)
test("defined(m1) || m1 > 20", 1)
# undefined macros -> 0
test("not_possibly_defined || another", 0)
test("1+2+((3+4)+5)+6==(6*7)/2==1*-1*-1", 1)
def add_defs(a, b, c, expected):
main = bld.path.find_resource('src/main.c')
bld.env.DEFINES = ['A=%s' % str(a), 'B=%s' % str(b), 'C=%s' % str(c)]
gruik = c_preproc.c_parser([main.parent])
gruik.start(main, bld.env)
if len(gruik.nodes) == 1 and gruik.nodes[0].name == expected:
color = "GREEN"
else:
color = "RED"
disp(color, "%r %r %r -> header %s (got %r)" % (a, b, c, expected, gruik.nodes))
add_defs(1, 1, 1, 'a.h')
add_defs(1, 1, 0, 'b.h')
add_defs(1, 0, 1, 'c.h')
add_defs(1, 0, 0, 'd.h')
add_defs(0, 1, 1, 'e.h')
add_defs(0, 1, 0, 'f.h')
add_defs(0, 0, 1, 'g.h')
add_defs(0, 0, 0, 'h.h')
defs = {
'a' : 'a 0',
'b' : 'b 1',
'c' : 'c 1',
'd' : 'd 0',
'e' : 'e a || b || c || d'
}
def test_pasting():
main = bld.path.find_resource('src/pasting.c')
bld.env.DEFINES = ['PREFIX_VAL=', 'SUFFIX_VAL=']
gruik = c_preproc.c_parser([main.parent])
gruik.start(main, bld.env)
if len(gruik.nodes) == 1 and gruik.nodes[0].name == 'a.h':
color = "GREEN"
else:
color = "RED"
disp(color, "token pasting -> %r (expected a.h)" % gruik.nodes)
test_pasting()
def test(x, result):
toks = c_preproc.tokenize(x)
c_preproc.reduce_tokens(toks, defs, [])
(_, ret) = c_preproc.reduce_eval(toks)
if int(ret) == result:
color = "GREEN"
else:
color = "RED"
disp(color, "%s\t\t%r" % (ret, toks))
test('a||b||c||d', 1)
test('a&&b&&c&&d', 0)
test('e', 1)
def test_rec(defines, expected):
main = bld.path.find_resource('recursion/a.c')
bld.env.DEFINES = defines.split()
gruik = c_preproc.c_parser([main.parent])
gruik.start(main, bld.env)
result = "".join([x.name[0] for x in gruik.nodes])
if result == expected:
color = "GREEN"
else:
color = "RED"
disp(color, "%s\t\t%r" % (expected, gruik.nodes))
test_rec("", "a")
test_rec("FOO=1", "ac")
test_rec("BAR=1", "abc")
test_rec("FOO=1 BAR=1", "ac")
return
test("1?1,(0?5:9):3,4", 0) # <- invalid expression
|
[
"yankee14.ed@gmail.com"
] |
yankee14.ed@gmail.com
|
|
05a269f38e872abaf687b447a962c6e69d2948d4
|
68cd659b44f57adf266dd37789bd1da31f61670d
|
/swea/뱀.py
|
020110d76f3e7ac5fc7f1bcce168de785b3bfe14
|
[] |
no_license
|
01090841589/solved_problem
|
c0c6f5a46e4d48860dccb3b0288aa5b56868fbca
|
bbea2f31e5fe36cad100bc514eacd83545fb25b1
|
refs/heads/master
| 2023-07-02T23:55:51.631478
| 2021-08-04T13:57:00
| 2021-08-04T13:57:00
| 197,157,830
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
import sys
sys.stdin = open('뱀.txt')
DIR = [[0, 1], [1, 0], [0, -1], [-1, 0]]
N = int(input())
a = int(input())
apple = [list(map(int, input().split())) for _ in range(a)]
s = int(input())
snake = [list(map(str, input().split())) for _ in range(s)]
snake.append([-1])
MAP = [[0]*N for _ in range(N)]
for i in apple:
MAP[i[0]-1][i[1]-1] = 9
MAP[0][0] = 1
stack = [[0, 0, 0, 0]]
long = [[0, 0]]
while True:
[y, x, t, c] = stack.pop(0)
t += 1
y += DIR[c][0]
x += DIR[c][1]
if t == int(snake[0][0]):
if snake[0][1] == 'L':
c = (c + 3) % 4
else:
c = (c + 1) % 4
snake.pop(0)
if 0 <= y < N and 0 <= x < N and MAP[y][x] != 1:
if MAP[y][x] == 9:
MAP[y][x] = 1
long.append([y, x])
else:
Y, X = long.pop(0)
MAP[Y][X] = 0
MAP[y][x] = 1
long.append([y, x])
stack.append([y, x, t, c])
else:
break
print(t)
|
[
"chanchanhwan@naver.com"
] |
chanchanhwan@naver.com
|
9c359467fb4335e915ad157c12c5bd309f190f2b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/231/28019/submittedfiles/swamee.py
|
eb9d48ac7aaed3c447136497d9b7cecec1da9701
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
# -*- coding: utf-8 -*-
import math
f=float(input('digite f:'))
l=float(input('digite l:'))
q=float(input('digite q:'))
deltah=float(input('digite deltah:'))
v=float(input('digite v:'))
g=9.81
e=0.000002
d=((8*f*l*q**2)/(math.pi**2*g*deltah))**0.5
rey=(4*q)/(math.pi*d*v)
k=0.25/((math.log10)((e/3.7*d)*((5.74)/rey**0.9)))**2
print('%.4f'%d)
print('%.4f'%rey)
print('%.4f'%k)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
a79fb5c1a81166b1a3b1ce3af402698df246bace
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03138/s242505224.py
|
5340460e00db7847047a31b2e818dc0725314dd4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
n,k = map(int,input().split())
a = list(map(int,input().split()))
t,ans =2**40,0
while t:
c = sum([(a[i]&t)//t for i in range(n)]) #a[i]のある桁における1の数
if c>=n-c or k<t: ans += t*c
else: ans += t*(n-c);k-=t
t = t>>1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2623877e3f99911ed39c4d01307f8420d2a1b535
|
fccc9acd62447941a49313c01fcf324cd07e832a
|
/Função/testefuncao.py
|
aff1ed7ba584e2b052a263e21cf54c5626d1febc
|
[] |
no_license
|
paulovictor1997/Python
|
fba884ea19ed996c6f884f3fcd3d49c5a34cfd3d
|
671d381673796919a19582bed9d0ee70ec5a8bea
|
refs/heads/master
| 2023-04-29T18:01:55.451258
| 2021-05-19T15:12:09
| 2021-05-19T15:12:09
| 354,154,332
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
#def soma(a =0,b =0,c = 0):
#s = a + b + c
#return s
#s1 = soma(1,2,1)
#s2 = soma(3,1,1)
#print(f'A soma vale : {s1} e {s2}')
#def contador(*num):
#tam = len(num)
#print(f'Valores : {num}. E tem {tam} número(s) em cada')
#contador(1 , 3, 5, 7)
#contador(1 , 5 , 7)
#def dobra(lista):
#pos = 0
#while pos < len(lista):
#lista[pos] *= 2
#pos += 1
#valores = [2,5,4,3]
#dobra(valores)
#print(valores)
#from random import shuffle
#def sorteio(lista):
# lista = [grupo1, grupo2, grupo3]
# shuffle(lista)
# print(f'A ordem da apresetação é : {lista}')
#programa principal
#lista = []
#print('Sorteio')
#grupo1 = str(input('1º Grupo : '))
#grupo2 = str(input('2º Grupo : '))
#grupo3 = str(input('3º Grupo : '))
#sorteio(lista)
#def função():
#n1 = 4
#print(f'n1 dentro vale {n1}')
#n1 = 2
#função()
#print(f'n1 fora vale {n1}')
#def teste():
#x = 8
#print(f'Na função teste X ele vale {x}')
#PRINCIPAL
#n = 2
#print(f'Na variável N ele vale {n}')
#teste()
def par(n=0):
if n % 2 == 0:
return True
else:
return False
num = int(input('Digite um número : '))
if par(num):
print('Número par !')
else:
print('Número ímpar !')
|
[
"paulovictornunes97@gmail.com"
] |
paulovictornunes97@gmail.com
|
44d5c97995747aa07393a15f51ed50bc5bae7f0a
|
eccbb87eefe632a1aa4eafb1e5581420ccf2224a
|
/network/nn.py
|
fe56be268b4051085e83df4e53349562c717d4fe
|
[] |
no_license
|
jianjunyue/python-learn-ml
|
4191fc675d79830308fd06a62f16a23295a48d32
|
195df28b0b8b8b7dc78c57dd1a6a4505e48e499f
|
refs/heads/master
| 2018-11-09T15:31:50.360084
| 2018-08-25T07:47:20
| 2018-08-25T07:47:20
| 102,184,768
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
import numpy as np
def nonlin(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
X = np.array([[0,0,1],
[0,1,1],
[1,0,1],
[1,1,1]])
print(X.shape)
y = np.array([[0],
[1],
[1],
[0]])
print(y.shape)
np.random.seed(1)
# randomly initialize our weights with mean 0
w0 = 2*np.random.random((3,4)) - 1
w1 = 2*np.random.random((4,1)) - 1
print(w0)
print(w1)
print(w0.shape)
print(w1.shape)
for j in range(60000):
l0 = X
l1 = nonlin(np.dot(l0,w0))
l2 = nonlin(np.dot(l1,w1))
l2_error = y - l2
if (j% 10000) == 0:
print("Error:" + str(np.mean(np.abs(l2_error))) )
l2_delta = l2_error*nonlin(l2,deriv=True)
l1_error = l2_delta.dot(w1.T)
l1_delta = l1_error * nonlin(l1,deriv=True)
w1 += l1.T.dot(l2_delta)
w0 += l0.T.dot(l1_delta)
|
[
"409494312@qq.com"
] |
409494312@qq.com
|
494edcb6a321c1c2a75f534f196e8e3761d09887
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/web-dev-notes-resource-site/2-content/Python/automate-the-boring/Automate_the_Boring_Stuff_onlinematerials_(1)/automate_online-materials/mapIt.py
|
962b6c390846e4270f16c8a7480e072327a7dc3f
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
#! python3
# mapIt.py - Launches a map in the browser using an address from the
# command line or clipboard.
import webbrowser, sys, pyperclip
if len(sys.argv) > 1:
# Get address from command line.
address = " ".join(sys.argv[1:])
else:
# Get address from clipboard.
address = pyperclip.paste()
webbrowser.open("https://www.google.com/maps/place/" + address)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
26457ac8cdae452935fadc9e69e29f8750810bd3
|
1d61057dab56fddd1edba78ce10b3a5f96c18c60
|
/diskinventory/build_database.py
|
d6819fa75fa4a82dc2e130fe3e97ee0927cb2b0b
|
[] |
no_license
|
phaustin/pythonlibs
|
f4f1f450d3e9bb8ebac5ffdb834d3750d80ee38e
|
35ed2675a734c9c63da15faf5386dc46c52c87c6
|
refs/heads/master
| 2022-04-29T00:45:34.428514
| 2022-03-06T17:16:30
| 2022-03-06T17:16:30
| 96,244,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,456
|
py
|
#!/home/phil/usr243_Fc3/bin/python
#ls -R -l -Q --time-style=full-iso --time=status /home/phil/* > listing.txt
#-rw-r--r-- 1 phil users 0 2005-10-06 12:28:09.000000000 -0700 "/home/phil/eosc211_fall2005.txt~"
#du /home/phil/* > ~/philprojects/disk_inventory/dulist.txt
import MySQLdb, MySQLdb.cursors
import glob, os.path
diskInfoDir='/home/phil/philprojects/diskInfoInventory-trunk'
# Jordan's diskInfoDir
#diskInfoDir='/home/jdawe/diskinfo/'
def create_ls(filesDB):
"""
Sets up the ls table in the given database object.
"""
cursor = filesDB.cursor()
thecols = """server varchar(255) DEFAULT ' ' NOT NULL,
partition varchar(255) DEFAULT ' ' NOT NULL,
permission char(10) DEFAULT ' ' NOT NULL,
links varchar(255) DEFAULT ' ' NOT NULL,
owner varchar(255) DEFAULT ' ' NOT NULL,
theGroup varchar(255) DEFAULT ' ' NOT NULL,
size int DEFAULT '-999' NOT NULL,
date date DEFAULT ' ' NOT NULL,
time time DEFAULT ' ' NOT NULL,
directory varchar(255) DEFAULT ' ' NOT NULL,
name varchar(255) DEFAULT ' ' NOT NULL"""
command = """CREATE TABLE ls (%s)""" % thecols
cursor.execute(command)
cursor.close()
##############
def create_lsowner(filesDB, owner):
"""
Sets up an lsowner table in the given database object.
"""
cursor = filesDB.cursor()
cursor.execute("drop table IF EXISTS ls%s;" % owner)
command = """create table ls%s
select server, partition, size, date, time, directory, name
from ls
where owner = "%s";""" % (owner, owner)
cursor.execute(command)
cursor.close()
##############
def create_du(filesDB):
"""
Sets up the du table in the given database object.
"""
cursor = filesDB.cursor()
thecols = """server varchar(255) DEFAULT ' ' NOT NULL,
partition varchar(255) DEFAULT ' ' NOT NULL,
size int unsigned DEFAULT ' ' NOT NULL,
name varchar(255) DEFAULT ' ' NOT NULL,
level tinyint unsigned DEFAULT ' ' NOT NULL"""
command = "CREATE TABLE du (%s)" % thecols
cursor.execute(command)
cursor.close()
#################
if __name__ == '__main__':
filesDB = MySQLdb.Connect(db = 'filecensus',
user = 'phil',
passwd = 'jeanluc')
cursor = filesDB.cursor()
cursor.execute("drop table IF EXISTS ls;")
cursor.execute("drop table IF EXISTS du;")
create_ls(filesDB)
create_du(filesDB)
ls_glob = glob.glob('%s/*_ls.csv' % diskInfoDir)
for filename in ls_glob:
print filename
cursor.execute("load data local infile '%s' into table ls fields terminated by ';';" % (filename))
# Get list of owners
cursor.execute("select owner from ls group by owner;")
owners = cursor.fetchall()
#for each owner, make a sub-database
for owner in owners:
# Testing code to not fill up /home partition.
if owner[0] == 'phil':
create_lsowner(filesDB, owner[0])
du_glob = glob.glob('%s/*_ls.csv' % diskInfoDir)
for filename in du_glob:
print filename
cursor.execute("load data local infile '%s' into table du fields terminated by ';';" % (filename))
cursor.close()
filesDB.close()
|
[
"paustin@eos.ubc.ca"
] |
paustin@eos.ubc.ca
|
7b9f607962a9ad7be9ffd32f81187cb4fffda709
|
d2d3ebfbf293fa96c95a29648f70774d310494d3
|
/app/storage/__init__.py
|
6e6201c1147b2dcca6c6f1008edee57cf2a9a6e3
|
[] |
no_license
|
sysdeep/DNote
|
092b01c38f7b1a266a59bddb327cee2d52ae01d2
|
6d933e3f487c5a03a4984a2bbcbc48ac38d4dc3c
|
refs/heads/master
| 2021-01-13T13:46:34.365021
| 2018-07-11T13:46:13
| 2018-07-11T13:46:13
| 76,347,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2018.08.11 - новая реализация хранилища - singleton
"""
import os.path
import shutil
# from app.rc import DIR_PROJECT
from .Manager import Manager
from .Storage import Storage
from .nodes.Nodes import NODE_TYPES
from app.rc import DIR_DEFAULTS
storage = Storage()
#
#
# from .loader import get_storage, load_default_project, open_storage
#
#
#
# smanager = Manager()
# # smanager.open_storage(DIR_PROJECT)
#
#
# # set_up()
def create_storage(name, path):
"""
Создать новое хранилище
"""
# log.info("SManager - create new store: {} in {}".format(name, path))
# self.storage_path = path
# sevents.storage_created()
new_storage_path = os.path.join(path, name)
DIR_DEFAULT_STORAGE_1 = os.path.join(DIR_DEFAULTS, "sdir1")
shutil.copytree(DIR_DEFAULT_STORAGE_1, new_storage_path)
# self.open_storage(new_storage_path)
storage.close_storage()
storage.open_storage(new_storage_path)
|
[
"sysdeep@yandex.ru"
] |
sysdeep@yandex.ru
|
b7878405a799077723e9ef10a02c1c9f638fa925
|
49e8edf6cefd68aea18d2799147b6f44ec44d7f1
|
/robot_learning/scripts/convert_odom.py
|
66d2ab337d1cc726811b24759145667e0127ac61
|
[] |
no_license
|
yycho0108/robot_learning
|
815a805bfbcf0ebd93164fa132780e03d7d0075b
|
b5ed0c13679c4ba3a569c020734308d3618ec57a
|
refs/heads/master
| 2020-04-01T22:37:37.211389
| 2018-12-18T08:59:44
| 2018-12-18T08:59:44
| 153,718,297
| 0
| 0
| null | 2018-10-19T02:55:43
| 2018-10-19T02:55:43
| null |
UTF-8
|
Python
| false
| false
| 2,690
|
py
|
import rospy
import tf
import numpy as np
from nav_msgs.msg import Odometry
from tf_conversions import posemath as pm
class ConvertOdom(object):
def __init__(self):
self.method_ = rospy.get_param('~method', default='tf')
self.pub_tf_ = rospy.get_param('~pub_tf', default=False)
# data
self.odom_ = None
self.recv_ = False
self.T_ = None
self.cvt_msg_ = Odometry()
# create ROS handles
self.tfl_ = tf.TransformListener()
self.tfb_ = tf.TransformBroadcaster()
self.sub_ = rospy.Subscriber('/android/odom', Odometry, self.odom_cb)
self.pub_ = rospy.Publisher('cvt_odom', Odometry, queue_size=10)
self.init_tf()
def init_tf(self):
# obtain base_link -> android transform
try:
rospy.loginfo_throttle(1.0, 'Attempting to obtain static transform ...')
#self.tfl_.waitForTransform('base_link', 'android', rospy.Duration(0.5))
txn, qxn = self.tfl_.lookupTransform('base_link', 'android', rospy.Time(0))
self.T_ = self.tfl_.fromTranslationRotation(txn,qxn)
self.Ti_ = tf.transformations.inverse_matrix(self.T_)
except tf.Exception as e:
rospy.logerr_throttle(1.0, 'Obtaining Fixed Transform Failed : {}'.format(e))
def odom_cb(self, msg):
self.odom_ = msg
self.recv_ = True
def step(self):
if self.T_ is None:
self.init_tf()
return
if self.recv_:
self.recv_ = False
pose = self.odom_.pose.pose
T0 = pm.toMatrix(pm.fromMsg(pose)) # android_odom --> android
# send odom -> base_link transform
T = tf.transformations.concatenate_matrices(self.T_,T0,self.Ti_)
frame = pm.fromMatrix(T)
if self.pub_tf_:
txn, qxn = pm.toTf(frame)
self.tfb_.sendTransform(txn, qxn, self.odom_.header.stamp,
'odom',
'base_link'
)
# send as msg
# TODO : does not deal with twist/covariance information
msg = pm.toMsg(frame)
self.cvt_msg_.pose.pose = pm.toMsg(frame)
self.cvt_msg_.header.frame_id = 'map' # experimental
self.cvt_msg_.header.stamp = self.odom_.header.stamp
self.pub_.publish(self.cvt_msg_)
def run(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
self.step()
rate.sleep()
def main():
rospy.init_node('convert_odom')
node = ConvertOdom()
node.run()
if __name__ == "__main__":
main()
|
[
"jchocholate@gmail.com"
] |
jchocholate@gmail.com
|
7ae6a828f59509f2a877757873e9e0e15f838175
|
c88e895ae9a08842987513d339cd98de735ee614
|
/tests/test_utils.py
|
631a24645c84d45814cf5874daefa65d94f256de
|
[
"Apache-2.0"
] |
permissive
|
tardyp/ramlfications
|
32cf25c4d5093424f7b71180c390bca777848dd8
|
cdc0d43d5e70a5f53fed9f25bff3529e89fff1af
|
refs/heads/master
| 2021-01-24T20:02:59.237045
| 2015-12-25T03:30:20
| 2015-12-25T03:30:20
| 49,712,338
| 0
| 0
| null | 2016-01-15T10:24:49
| 2016-01-15T10:24:48
| null |
UTF-8
|
Python
| false
| false
| 5,683
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Spotify AB
import sys
if sys.version_info[0] == 2:
from io import open
import json
import os
import tempfile
from mock import Mock, patch
import pytest
import xmltodict
from ramlfications import utils
from .base import UPDATE
@pytest.fixture(scope="session")
def downloaded_xml():
return os.path.join(UPDATE, "iana_mime_media_types.xml")
@pytest.fixture(scope="session")
def invalid_xml():
return os.path.join(UPDATE, "invalid_iana_download.xml")
@pytest.fixture(scope="session")
def no_data_xml():
return os.path.join(UPDATE, "no_data.xml")
@pytest.fixture(scope="session")
def expected_data():
expected_json = os.path.join(UPDATE, "expected_mime_types.json")
with open(expected_json, "r", encoding="UTF-8") as f:
return json.load(f)
@pytest.fixture(scope="session")
def parsed_xml(downloaded_xml):
with open(downloaded_xml, "r", encoding="UTF-8") as f:
data = f.read()
return xmltodict.parse(data)
def test_xml_to_dict(downloaded_xml):
with open(downloaded_xml, "r", encoding="UTF-8") as f:
data = f.read()
xml_data = utils._xml_to_dict(data)
assert xml_data is not None
assert isinstance(xml_data, dict)
def test_xml_to_dict_no_data(no_data_xml):
with pytest.raises(utils.MediaTypeError) as e:
with open(no_data_xml, "r", encoding="UTF-8") as f:
data = f.read()
utils._xml_to_dict(data)
msg = "Error parsing XML: "
assert msg in e.value.args[0]
def test_xml_to_dict_invalid(invalid_xml):
with pytest.raises(utils.MediaTypeError) as e:
with open(invalid_xml, "r", encoding="UTF-8") as f:
data = f.read()
utils._xml_to_dict(data)
msg = "Error parsing XML: "
assert msg in e.value.args[0]
def test_parse_xml_data(parsed_xml, expected_data):
result = utils._parse_xml_data(parsed_xml)
assert result == expected_data
assert len(result) == len(expected_data)
@pytest.fixture(scope="session")
def incorrect_registry_count():
xml_file = os.path.join(UPDATE, "unexpected_registry_count.xml")
with open(xml_file, "r", encoding="UTF-8") as f:
data = f.read()
return xmltodict.parse(data)
def test_parse_xml_data_incorrect_reg(incorrect_registry_count):
with pytest.raises(utils.MediaTypeError) as e:
utils._parse_xml_data(incorrect_registry_count)
msg = ("Expected 9 registries but parsed 2",)
assert e.value.args == msg
@pytest.fixture(scope="session")
def no_registries():
xml_file = os.path.join(UPDATE, "no_registries.xml")
with open(xml_file, "r", encoding="UTF-8") as f:
data = f.read()
return xmltodict.parse(data)
def test_parse_xml_data_no_reg(no_registries):
with pytest.raises(utils.MediaTypeError) as e:
utils._parse_xml_data(no_registries)
msg = ("No registries found to parse.",)
assert e.value.args == msg
def test_requests_download_xml(downloaded_xml):
utils.requests = Mock()
with open(downloaded_xml, "r", encoding="UTF-8") as xml:
expected = xml.read()
utils.requests.get.return_value.text = expected
results = utils._requests_download(utils.IANA_URL)
assert results == expected
def test_urllib_download(downloaded_xml):
utils.urllib = Mock()
with open(downloaded_xml, "r", encoding="UTF-8") as xml:
utils.urllib.urlopen.return_value = xml
results = utils._urllib_download(utils.IANA_URL)
with open(downloaded_xml, "r", encoding="UTF-8") as xml:
assert results == xml.read()
@patch("ramlfications.utils._parse_xml_data")
@patch("ramlfications.utils._xml_to_dict")
@patch("ramlfications.utils._save_updated_mime_types")
def test_insecure_download_urllib_flag(_a, _b, _c, mocker, monkeypatch):
monkeypatch.setattr(utils, "SECURE_DOWNLOAD", False)
monkeypatch.setattr(utils, "URLLIB", True)
utils.requests = Mock()
mocker.patch("ramlfications.utils._urllib_download")
utils.update_mime_types()
utils._urllib_download.assert_called_once()
mocker.stopall()
@patch("ramlfications.utils._xml_to_dict")
@patch("ramlfications.utils._parse_xml_data")
@patch("ramlfications.utils._save_updated_mime_types")
def test_secure_download_requests_flag(_a, _b_, _c, mocker, monkeypatch):
monkeypatch.setattr(utils, "SECURE_DOWNLOAD", True)
monkeypatch.setattr(utils, "URLLIB", False)
utils.urllib = Mock()
mocker.patch("ramlfications.utils._requests_download")
utils.update_mime_types()
utils._requests_download.assert_called_once()
mocker.stopall()
@patch("ramlfications.utils._xml_to_dict")
@patch("ramlfications.utils._parse_xml_data")
@patch("ramlfications.utils._requests_download")
@patch("ramlfications.utils._urllib_download")
@patch("ramlfications.utils._save_updated_mime_types")
def test_update_mime_types(_a, _b, _c, _d, _e, downloaded_xml):
utils.requests = Mock()
with open(downloaded_xml, "r", encoding="UTF-8") as raw_data:
utils.update_mime_types()
utils._requests_download.assert_called_once()
utils._requests_download.return_value = raw_data.read()
utils._xml_to_dict.assert_called_once()
utils._parse_xml_data.assert_called_once()
utils._save_updated_mime_types.assert_called_once()
def test_save_updated_mime_types():
content = ["foo/bar", "bar/baz"]
temp_output = tempfile.mkstemp()[1]
utils._save_updated_mime_types(temp_output, content)
result = open(temp_output, "r", encoding="UTF-8").read()
result = json.loads(result)
assert result == content
os.remove(temp_output)
|
[
"lynn@spotify.com"
] |
lynn@spotify.com
|
9bee7b08f46de36e23ae891f4c9864f8e80a81f6
|
865f33817e8989160b1d396ae96554134dc8c01a
|
/slack_bolt/workflows/step/utilities/__init__.py
|
eedc9c85efae340228b286a9ed898fdf72758dbf
|
[
"MIT"
] |
permissive
|
xocliwtb/bolt-python
|
4f075b945ef5b51e6e1e54f7b7afc36bd7894a91
|
11e202a14d6196585312e2d456e45267be9dd465
|
refs/heads/main
| 2023-03-23T21:26:50.256141
| 2021-03-24T12:22:50
| 2021-03-24T12:38:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
"""Utilities specific to workflow steps from apps.
In workflow step listeners, you can use a few specific listener/middleware arguments.
### `edit` listener
* `slack_bolt.workflows.step.utilities.configure` for building a modal view
### `save` listener
* `slack_bolt.workflows.step.utilities.update` for updating the step metadata
### `execute` listener
* `slack_bolt.workflows.step.utilities.fail` for notifying the execution failure to Slack
* `slack_bolt.workflows.step.utilities.complete` for notifying the execution completion to Slack
For asyncio-based apps, refer to the corresponding `async` prefixed ones.
"""
|
[
"seratch@gmail.com"
] |
seratch@gmail.com
|
dd49ab2283fcd7331e36188957f09dffafcce446
|
359cbc4e41d2321dbcaa610fb51132d5013514e5
|
/django_project/celery.py
|
893a0aa658718cef20b58ea05544cfd34ef59f12
|
[
"MIT"
] |
permissive
|
runmosta/product-database
|
780ff9d8dcf64af5fd191d1cb0ace94dd4c18d0e
|
1b46d6d0464a5196cf9f70c060fe9e9ae614f8ea
|
refs/heads/master
| 2021-01-22T23:44:35.711366
| 2017-03-16T12:12:49
| 2017-03-16T12:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,136
|
py
|
from __future__ import absolute_import
import logging
import os
import celery
import raven
from celery import states
from django.conf import settings
from django.core.cache import cache
from raven.contrib.celery import register_signal, register_logger_signal
class Celery(celery.Celery):
def on_configure(self):
if settings.PDB_ENABLE_SENTRY: # ignore for coverage
client = raven.Client(settings.PDB_SENTRY_DSN)
client.release = raven.fetch_git_sha(os.path.dirname(os.pardir))
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project.settings')
app = Celery('product_db')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
class TaskState(object):
"""
states used for celery tasks
"""
SUCCESS = states.SUCCESS
FAILED = states.FAILURE
STARTED = states.STARTED
PROCESSING = "processing"
PENDING = states.PENDING
def is_worker_active():
try:
i = app.control.inspect()
if i.registered():
return True
except:
pass
logging.error("Celery Worker process not available")
return False
def get_meta_data_for_task(task_id):
try:
meta_data = cache.get("task_meta_%s" % task_id, {})
except Exception: # catch any exception
logging.debug("no meta information for task '%s' found" % task_id, exc_info=True)
meta_data = {}
return meta_data
def set_meta_data_for_task(task_id, title, redirect_to=None, auto_redirect=True):
meta_data = {
"title": title,
"auto_redirect": auto_redirect
}
if redirect_to:
meta_data["redirect_to"] = redirect_to
cache.set("task_meta_%s" % task_id, meta_data, 60 * 60 * 8)
@app.task
def hello_task():
logging.info("Hello Task called")
return {
"hello": "task"
}
|
[
"henry@codingnetworker.com"
] |
henry@codingnetworker.com
|
0ff06ec8d28402aab70bef401caaea5fd2e432f8
|
6ceea2578be0cbc1543be3649d0ad01dd55072aa
|
/src/fipy/viewers/matplotlibViewer/matplotlib1DViewer.py
|
a7697b5974aec5a137e5dbbf43f731801af8300d
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
regmi/fipy
|
57972add2cc8e6c04fda09ff2faca9a2c45ad19d
|
eb4aacf5a8e35cdb0e41beb0d79a93e7c8aacbad
|
refs/heads/master
| 2020-04-27T13:51:45.095692
| 2010-04-09T07:32:42
| 2010-04-09T07:32:42
| 602,099
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,954
|
py
|
#!/usr/bin/env python
## -*-Pyth-*-
# ###################################################################
# FiPy - Python-based finite volume PDE solver
#
# FILE: "matplotlib1DViewer.py"
#
# Author: Jonathan Guyer <guyer@nist.gov>
# Author: Daniel Wheeler <daniel.wheeler@nist.gov>
# Author: James Warren <jwarren@nist.gov>
# mail: NIST
# www: http://www.ctcms.nist.gov/fipy/
#
# ========================================================================
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. FiPy is an experimental
# system. NIST assumes no responsibility whatsoever for its use by
# other parties, and makes no guarantees, expressed or implied, about
# its quality, reliability, or any other characteristic. We would
# appreciate acknowledgement if the software is used.
#
# This software can be redistributed and/or modified freely
# provided that any derivative works bear some notice that they are
# derived from it, and any modified versions bear some notice that
# they have been modified.
# ========================================================================
# See the file "license.terms" for information on usage and redistribution
# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
#
# ###################################################################
##
__docformat__ = 'restructuredtext'
from matplotlibViewer import _MatplotlibViewer
class Matplotlib1DViewer(_MatplotlibViewer):
"""
Displays a y vs. x plot of one or more 1D `CellVariable` objects using
Matplotlib_.
.. _Matplotlib: http://matplotlib.sourceforge.net/
"""
__doc__ += _MatplotlibViewer._test1D(viewer="Matplotlib1DViewer")
def __init__(self, vars, title=None, xlog=False, ylog=False, limits={}, **kwlimits):
"""
:Parameters:
vars
a `CellVariable` or tuple of `CellVariable` objects to plot
title
displayed at the top of the `Viewer` window
xlog
log scaling of x axis if `True`
ylog
log scaling of y axis if `True`
limits : dict
a (deprecated) alternative to limit keyword arguments
xmin, xmax, datamin, datamax
displayed range of data. Any limit set to
a (default) value of `None` will autoscale.
(*ymin* and *ymax* are synonyms for *datamin* and *datamax*).
"""
kwlimits.update(limits)
_MatplotlibViewer.__init__(self, vars=vars, title=title, **kwlimits)
import pylab
if xlog and ylog:
self.lines = [pylab.loglog(*datum) for datum in self._getData()]
elif xlog:
self.lines = [pylab.semilogx(*datum) for datum in self._getData()]
elif ylog:
self.lines = [pylab.semilogy(*datum) for datum in self._getData()]
else:
self.lines = [pylab.plot(*datum) for datum in self._getData()]
pylab.legend([var.getName() for var in self.vars])
pylab.xlim(xmin = self._getLimit('xmin'),
xmax = self._getLimit('xmax'))
ymin = self._getLimit(('datamin', 'ymin'))
ymax = self._getLimit(('datamax', 'ymax'))
pylab.ylim(ymin=ymin, ymax=ymax)
if ymax is None or ymin is None:
import warnings
warnings.warn("Matplotlib1DViewer efficiency is improved by setting the 'datamax' and 'datamin' keys", UserWarning, stacklevel=2)
def _getData(self):
from fipy.tools.numerix import array
return [[array(var.getMesh().getCellCenters()[0]), array(var)] for var in self.vars]
def _getSuitableVars(self, vars):
vars = [var for var in _MatplotlibViewer._getSuitableVars(self, vars) if var.getMesh().getDim() == 1]
if len(vars) > 1:
vars = [var for var in vars if var.getMesh() is vars[0].getMesh()]
if len(vars) == 0:
from fipy.viewers import MeshDimensionError
raise MeshDimensionError, "Can only plot 1D data"
return vars
def _plot(self):
ymin, ymax = self._autoscale(vars=self.vars,
datamin=self._getLimit(('datamin', 'ymin')),
datamax=self._getLimit(('datamax', 'ymax')))
import pylab
pylab.ylim(ymin=ymin, ymax=ymax)
for line, datum in zip(self.lines, self._getData()):
line[0].set_xdata(datum[0])
line[0].set_ydata(datum[1])
if __name__ == "__main__":
import fipy.tests.doctestPlus
fipy.tests.doctestPlus.execButNoTest()
|
[
"regmisk@gmail.com"
] |
regmisk@gmail.com
|
98da77c4e00f8af57fb0f7401c7164edec898362
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startPyquil3397.py
|
755036f7e7971cea85acc259f8c84b03de4679f6
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,022
|
py
|
# qubit number=4
# total number=46
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=20
prog += CZ(0,3) # number=21
prog += H(3) # number=22
prog += X(3) # number=13
prog += H(3) # number=23
prog += CZ(0,3) # number=24
prog += H(3) # number=25
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += Y(2) # number=18
prog += CNOT(3,0) # number=40
prog += Z(3) # number=41
prog += H(0) # number=43
prog += CZ(3,0) # number=44
prog += H(0) # number=45
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += H(0) # number=33
prog += CZ(2,0) # number=34
prog += H(0) # number=35
prog += H(1) # number=19
prog += H(0) # number=15
prog += CZ(2,0) # number=16
prog += H(0) # number=17
prog += RX(1.6838936623241292,2) # number=36
prog += Y(1) # number=26
prog += Y(1) # number=27
prog += SWAP(1,0) # number=29
prog += SWAP(1,0) # number=30
prog += X(0) # number=31
prog += CNOT(1,0) # number=37
prog += X(0) # number=38
prog += CNOT(1,0) # number=39
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3397.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
0d26c8c18a0c57b0fef625f11d847b87fef5a932
|
3fd8958283dc1f0f3619b2835efb0c735eecb3db
|
/slixmpp/plugins/xep_0428/stanza.py
|
eb3c3daebf218a4437687b4360e7085aef660b7d
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
poezio/slixmpp
|
0e8ea7a5c12180f7806889a3a1d0c1136116c416
|
7a0fb970833c778ed50dcb49c5b7b4043d57b1e5
|
refs/heads/master
| 2023-08-13T15:05:51.996989
| 2022-10-03T08:20:19
| 2022-10-03T08:20:19
| 26,556,621
| 97
| 51
|
NOASSERTION
| 2023-08-01T08:20:03
| 2014-11-12T21:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
# Slixmpp: The Slick XMPP Library
# Copyright (C) 2020 Mathieu Pasquet <mathieui@mathieui.net>
# This file is part of Slixmpp.
# See the file LICENSE for copying permissio
from slixmpp.stanza import Message
from slixmpp.xmlstream import (
ElementBase,
register_stanza_plugin,
)
NS = 'urn:xmpp:fallback:0'
class Fallback(ElementBase):
namespace = NS
name = 'fallback'
plugin_attrib = 'fallback'
def register_plugins():
register_stanza_plugin(Message, Fallback)
|
[
"mathieui@mathieui.net"
] |
mathieui@mathieui.net
|
3b51948f1918b55d1e39c110991ffe7fe8740878
|
244189d49a3967b4b002af73f40ca8e8064c4771
|
/modules/payloads/stages/windows/x64/vncinject.rb
|
8f466c207cac07f11e4e57704d91a5b147ecb2c1
|
[
"MIT"
] |
permissive
|
darkcode357/thg-framework
|
7540609fb79619bdc12bd98664976d51c79816aa
|
c1c3bd748aac85a8c75e52486ae608981a69d93a
|
refs/heads/master
| 2023-03-01T05:06:51.399919
| 2021-06-01T14:00:32
| 2021-06-01T14:00:32
| 262,925,227
| 11
| 6
|
NOASSERTION
| 2023-02-10T23:11:02
| 2020-05-11T03:04:05
|
Python
|
UTF-8
|
Python
| false
| false
| 932
|
rb
|
##
# This module requires Metasploit: https://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'msf/core/payload/windows/x64/reflectivedllinject'
require 'msf/base/sessions/vncinject'
require 'msf/base/sessions/vncinject_options'
###
#
# Injects the VNC server DLL (via Reflective Dll Injection) and runs it over the established connection.
#
###
module MetasploitModule
include Msf::Payload::Windows::ReflectiveDllInject_x64
include Msf::Sessions::VncInjectOptions
def initialize(info = {})
super(update_info(info,
'Name' => 'Windows x64 VNC Server (Reflective Injection)',
'Description' => 'Inject a VNC Dll via a reflective loader (Windows x64) (staged)',
'Author' => [ 'sf' ],
'Session' => Msf::Sessions::VncInject ))
end
def library_path
File.join(Msf::Config.data_directory, "vncdll.x64.dll")
end
end
|
[
"darkocde357@gmail.com"
] |
darkocde357@gmail.com
|
95ca5345a8e610b9a95855f36668531f02c28e8c
|
fab39aa4d1317bb43bc11ce39a3bb53295ad92da
|
/examples/torch/object_detection/layers/extensions/__init__.py
|
17620e92e9100990a5186deae8331de1fc7e059b
|
[
"Apache-2.0"
] |
permissive
|
dupeljan/nncf
|
8cdce27f25f01ce8e611f15e1dc3036fb8548d6e
|
0abfd7103ca212888a946ba4d0fbdb9d436fdaff
|
refs/heads/develop
| 2023-06-22T00:10:46.611884
| 2021-07-22T10:32:11
| 2021-07-22T10:32:11
| 388,719,455
| 0
| 0
|
Apache-2.0
| 2021-07-23T07:46:15
| 2021-07-23T07:43:43
| null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
import os.path
import torch
from torch.utils.cpp_extension import load
from nncf.torch.extensions import CudaNotAvailableStub
ext_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
if torch.cuda.is_available():
EXTENSIONS = load(
'extensions', [
os.path.join(ext_dir, 'extensions.cpp'),
os.path.join(ext_dir, 'nms/nms.cpp'),
os.path.join(ext_dir, 'nms/nms_kernel.cu'),
],
verbose=False
)
else:
EXTENSIONS = CudaNotAvailableStub
|
[
"noreply@github.com"
] |
dupeljan.noreply@github.com
|
d76f494dd6a71b2112a8888bb41eff64b231b4da
|
e02806138dfef3763f1f93bf8cb4990bd7502c06
|
/misc/convert_to_bilou.py
|
89959771d0c74c0a23220a553aec22203eeaaf4a
|
[
"Apache-2.0"
] |
permissive
|
ELS-RD/anonymisation
|
b40a809865d943d4abdceb74cc49ac03209373a9
|
0b02b4e3069729673e0397a1dbbc50ae9612d90f
|
refs/heads/master
| 2021-06-22T07:14:26.082920
| 2020-11-28T20:52:50
| 2020-11-28T20:52:50
| 139,612,256
| 87
| 21
|
Apache-2.0
| 2020-10-29T17:23:18
| 2018-07-03T16:48:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,862
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, Optional, Tuple
from spacy.gold import GoldParse, biluo_tags_from_offsets
from spacy.tokens.doc import Doc
from xml_extractions.extract_node_values import Offset
no_action_bilou = None
unknown_type_name = "UNKNOWN"
def convert_bilou_with_missing_action(doc: Doc, offsets: List[Tuple[int, int, str]]) -> List[Optional[str]]:
"""
Convert unknown type token to missing value for NER
Therefore no Loss will be applied to these tokens
https://spacy.io/api/goldparse#biluo_tags_from_offsets
:param doc: text tokenized by Spacy
:param offsets: original offsets
:return: list of BILOU types
"""
result = biluo_tags_from_offsets(doc, offsets)
return [no_action_bilou if unknown_type_name in action_bilou else action_bilou for action_bilou in result]
def convert_unknown_bilou(doc: Doc, offsets: List[Offset]) -> GoldParse:
"""
Convert entity offsets to list of BILOU annotations
and convert UNKNOWN label to Spacy missing values
https://spacy.io/api/goldparse#biluo_tags_from_offsets
:param doc: spacy tokenized text
:param offsets: discovered offsets
:return: tuple of docs and BILOU annotations
"""
tupple_offset = [offset.to_tuple() for offset in offsets]
bilou_annotations = convert_bilou_with_missing_action(doc=doc, offsets=tupple_offset)
return GoldParse(doc, entities=bilou_annotations)
def convert_unknown_bilou_bulk(docs: List[Doc], offsets: List[List[Offset]]) -> List[GoldParse]:
"""
Convert list of entity offsets to list of BILOU annotations
and convert UNKNOWN label to Spacy missing values
https://spacy.io/api/goldparse#biluo_tags_from_offsets
:param docs: spacy tokenized text
:param offsets: discovered offsets
:return: tuple of docs and GoldParse
"""
list_of_gold_parse = list()
for doc, current_offsets in zip(docs, offsets):
bilou_annotations = convert_unknown_bilou(doc=doc, offsets=current_offsets)
list_of_gold_parse.append(bilou_annotations)
return list_of_gold_parse
|
[
"pommedeterresautee@msn.com"
] |
pommedeterresautee@msn.com
|
ac2a955bb3b15a542b3ea4cc42c726a3a191c310
|
beb3ee10276cbbb9c3b32bcebc38427816d7302a
|
/cmc/conf/prod.py
|
b0ab32587a9624ce29e6a918b6951a43066fc745
|
[
"MIT"
] |
permissive
|
einsfr/cmc
|
df147ebc06b23801cb0f9982f49a1837a31dd2ff
|
3735c0182b552757171f4e38af2463a9059a60da
|
refs/heads/master
| 2021-01-12T09:29:17.865237
| 2016-12-14T14:36:07
| 2016-12-14T14:36:07
| 76,158,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
ALLOWED_HOSTS = []
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
|
[
"einsfr@users.noreply.github.com"
] |
einsfr@users.noreply.github.com
|
c1b3eed09a9e40c8dd977b55adb93eb45df42db9
|
0cdeba10a9e29fb6abc324a80605770edf887cd1
|
/代理/11111.py
|
d121342e3f0b6238a2d3b6dd44dc5e7d9b0407cf
|
[] |
no_license
|
snailuncle/spider01
|
721e655771f17cbbb47fac8fa7771325a6f86770
|
49c83be01cbd3ef8ebabb83fb6409ef2c10692bc
|
refs/heads/master
| 2021-04-12T08:22:25.337779
| 2018-04-30T05:34:55
| 2018-04-30T05:34:55
| 126,051,990
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,580
|
py
|
# -*- coding:UTF-8 -*-
from bs4 import BeautifulSoup
# from selenium import webdriver
import subprocess as sp
from lxml import etree
import requests
import random
import re
"""
函数说明:获取IP代理
Parameters:
page - 高匿代理页数,默认获取第一页
Returns:
proxys_list - 代理列表
Modify:
2017-05-27
"""
def get_proxys(page = 1):
#requests的Session可以自动保持cookie,不需要自己维护cookie内容
S = requests.Session()
#西祠代理高匿IP地址
target_url = 'http://www.xicidaili.com/nn/%d' % page
#完善的headers
target_headers = {
'Accept':'*/*'
'Accept-Encoding':'gzip, deflate'
'Accept-Language':'zh-CN,zh;q=0.9'
'Connection':'keep-alive'
'Host':'60.221.213.24'
'Origin':'null'
'Referer':'http://www.iqiyi.com/v_19rroheauw.html'
'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.32 Safari/537.36'
}
#get请求
target_response = S.get(url=target_url, headers = target_headers)
#utf-8编码
target_response.encoding = 'utf-8'
#获取网页信息
target_html = target_response.text
#获取id为ip_list的table
bf1_ip_list = BeautifulSoup(target_html, 'lxml')
bf2_ip_list = BeautifulSoup(str(bf1_ip_list.find_all(id = 'ip_list')), 'lxml')
ip_list_info = bf2_ip_list.table.contents
#存储代理的列表
proxys_list = []
#爬取每个代理信息
for index in range(len(ip_list_info)):
if index % 2 == 1 and index != 1:
dom = etree.HTML(str(ip_list_info[index]))
ip = dom.xpath('//td[2]')
port = dom.xpath('//td[3]')
protocol = dom.xpath('//td[6]')
proxys_list.append(protocol[0].text.lower() + '#' + ip[0].text + '#' + port[0].text)
#返回代理列表
return proxys_list
"""
函数说明:检查代理IP的连通性
Parameters:
ip - 代理的ip地址
lose_time - 匹配丢包数
waste_time - 匹配平均时间
Returns:
average_time - 代理ip平均耗时
Modify:
2017-05-27
"""
def check_ip(ip, lose_time, waste_time):
#命令 -n 要发送的回显请求数 -w 等待每次回复的超时时间(毫秒)
cmd = "ping -n 3 -w 3 %s"
#执行命令
p = sp.Popen(cmd % ip, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
#获得返回结果并解码
out = p.stdout.read().decode("gbk")
#丢包数
lose_time = lose_time.findall(out)
#当匹配到丢失包信息失败,默认为三次请求全部丢包,丢包数lose赋值为3
if len(lose_time) == 0:
lose = 3
else:
lose = int(lose_time[0])
#如果丢包数目大于2个,则认为连接超时,返回平均耗时1000ms
if lose > 2:
#返回False
return 1000
#如果丢包数目小于等于2个,获取平均耗时的时间
else:
#平均时间
average = waste_time.findall(out)
#当匹配耗时时间信息失败,默认三次请求严重超时,返回平均好使1000ms
if len(average) == 0:
return 1000
else:
#
average_time = int(average[0])
#返回平均耗时
return average_time
"""
函数说明:初始化正则表达式
Parameters:
无
Returns:
lose_time - 匹配丢包数
waste_time - 匹配平均时间
Modify:
2017-05-27
"""
def initpattern():
#匹配丢包数
lose_time = re.compile(r"丢失 = (\d+)", re.IGNORECASE)
#匹配平均时间
waste_time = re.compile(r"平均 = (\d+)ms", re.IGNORECASE)
return lose_time, waste_time
def proxy_get():
# if __name__ == '__main__':
#初始化正则表达式
lose_time, waste_time = initpattern()
#获取IP代理
proxys_list = get_proxys(1)
#如果平均时间超过200ms重新选取ip
while True:
#从100个IP中随机选取一个IP作为代理进行访问
proxy = random.choice(proxys_list)
split_proxy = proxy.split('#')
#获取IP
ip = split_proxy[1]
#检查ip
average_time = check_ip(ip, lose_time, waste_time)
if average_time > 200:
#去掉不能使用的IP
proxys_list.remove(proxy)
print("ip连接超时, 重新获取中!")
if average_time < 200:
break
#去掉已经使用的IP
proxys_list.remove(proxy)
proxy_dict = {split_proxy[0]:split_proxy[1] + ':' + split_proxy[2]}
# print("使用代理:", proxy_dict)
# {'https': '117.25.189.249:20814'}
return proxy_dict
|
[
"1789500304@qq.com"
] |
1789500304@qq.com
|
05e0ad0c9844453bca4fa2dedf0a25e400f0c2ab
|
463087b1f650928385c711390fb8ead19e63cc76
|
/rooms/views.py
|
b904a80e7905eb776412a64a4cfc97f8527b2032
|
[] |
no_license
|
sbtiffanykim/awesome-api
|
81a81b45c793b393512fd1b1a01085fd96c8af51
|
b1f4c8ef0caf32f3fe2522d3011f68dcfd34065b
|
refs/heads/master
| 2023-02-05T19:50:48.990059
| 2020-12-21T14:14:29
| 2020-12-21T14:14:29
| 319,961,409
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Room
from .serializers import RoomSerializer
class RoomsView(APIView):
def get(self, request):
rooms = Room.objects.all()
serializer = RoomSerializer(rooms, many=True).data
return Response(serializer)
def post(self, request):
if not request.user.is_authenticated:
return Response(status=status.HTTP_401_UNAUTHORIZED)
serializer = RoomSerializer(data=request.data)
if serializer.is_valid():
room = serializer.save(user=request.user)
room_serializer = RoomSerializer(room).data
return Response(data=room_serializer, status=status.HTTP_200_OK)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class RoomView(APIView):
def get_room(self, pk):
try:
room = Room.objects.get(pk=pk)
return room
except Room.DoesNotExist:
return None
def get(self, request, pk):
room = self.get_room(pk)
if room is not None:
serializer = RoomSerializer(room).data
return Response(serializer)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
# to update the room
def put(self, request, pk):
room = self.get_room(pk)
# instance == room & data == request.data
if room is not None:
if room.user != request.user:
return Response(status=status.HTTP_403_FORBIDDEN)
serializer = RoomSerializer(room, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(RoomSerializer(room).data)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response()
else:
return Response(status=status.HTTP_404_NOT_FOUND)
def delete(self, request, pk):
room = self.get_room(pk)
if room is not None:
if room.user != request.user:
return Response(status=status.HTTP_403_FORBIDDEN)
room.delete()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
|
[
"sbtiffanykim@gmail.com"
] |
sbtiffanykim@gmail.com
|
6a501f24bce4742b2280fbad8b6f066b1a1cd36b
|
02863fb122e736e1d1193c01270a3731dd114f79
|
/venv/Lib/site-packages/tensorflow/keras/applications/xception/__init__.py
|
9427a25842edb2883be8696835bd990586d9ae2a
|
[] |
no_license
|
umedsondoniyor/PredictiveMaintenance
|
ee9dd4fe8d3366be4c5b192b4275f23903dbd285
|
88d8184cc2a958aa5feb9b55a0d5d9b6de36c22e
|
refs/heads/master
| 2021-06-18T23:42:24.901395
| 2021-03-03T15:36:55
| 2021-03-03T15:36:55
| 142,778,437
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Do note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function
is also different (same as Inception V3).
Also do note that this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers.
# Reference
- [Xception: Deep Learning with Depthwise Separable
Convolutions](https://arxiv.org/abs/1610.02357)
"""
from __future__ import print_function
from tensorflow.python.keras.applications import Xception
from tensorflow.python.keras.applications.densenet import decode_predictions
from tensorflow.python.keras.applications.xception import preprocess_input
del print_function
|
[
"umedzhonizbasarov@gmail.com"
] |
umedzhonizbasarov@gmail.com
|
af8fe2f47bb526cbdc6867a4f95b7d42e1af9a5a
|
05e61f3db737b7327849d1301b1ed3ba38028a9a
|
/seata/sqlparser/mysql/antlr4/value/table.py.txt
|
40d03cc824fd8557e349c784ce853ac5a45ad43c
|
[] |
no_license
|
JoshYuJump/seata-python
|
d56b2d27593ce92c39640c45b6a5ef1b27d0ce84
|
9fe12dd3ddea0903db7c52bd6810df2da8012417
|
refs/heads/master
| 2023-07-14T16:06:59.165247
| 2021-08-20T01:16:06
| 2021-08-22T13:19:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,808
|
txt
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
"""
from seata.sqlparser.mysql.antlr4.value.value import IdentifierValue
class OwnerSegment:
def __init__(self, start_index: int, stop_index: int, identifier: IdentifierValue):
self.start_index = start_index
self.stop_index = stop_index
self.identifier = identifier
class AliasSegment:
def __init__(self, start_index: int, stop_index: int, identifier: IdentifierValue):
self.start_index = start_index
self.stop_index = stop_index
self.identifier = identifier
class TableNameSegment:
def __init__(self, start_index: int, stop_index: int, identifier: IdentifierValue):
self.start_index = start_index
self.stop_index = stop_index
self.identifier = identifier
class SimpleTableSegment:
def __init__(self, table_name: TableNameSegment, owner: OwnerSegment = None, alias: AliasSegment = None):
self.table_name = table_name
self.owner = owner
self.alias = alias
def get_start_index(self):
if self.owner is None:
return self.table_name.start_index
else:
return self.owner.stop_index
def get_stop_index(self):
if self.alias is None:
return self.table_name.stop_index
else:
return self.alias.stop_index
def get_alias(self):
if self.alias is None:
return ""
else:
self.alias.identifier.value
class ColumnSegment:
def __init__(self, start_index: int, stop_index: int, identifier: IdentifierValue, owner: OwnerSegment = None):
self.start_index = start_index
self.stop_index = stop_index
self.identifier = identifier
self.owner = owner
class IndexSegment:
def __init__(self, start_index: int, stop_index: int, identifier: IdentifierValue, owner: OwnerSegment = None):
self.start_index = start_index
self.stop_index = stop_index
self.identifier = identifier
self.owner = owner
class Interval:
def __init__(self, start_index: int, stop_index: int):
self.start_index = start_index
self.stop_index = stop_index
class ExpressionSegment:
pass
class SubquerySegment:
pass
class MySQLSelectStatement:
def __init__(self):
self.order_by = None
self.limit = None
class ListExpression:
pass
class InExpression:
def __init__(self, start_index: int, stop_index: int, left: ExpressionSegment, right: ListExpression, not_):
self.start_index = start_index
self.stop_index = stop_index
self.left = left
self.right = right
self.not_ = not_
class BetweenExpression:
def __init__(self, start_index: int, stop_index: int, left: ExpressionSegment, between: ExpressionSegment, and_,
not_):
self.start_index = start_index
self.stop_index = stop_index
self.left = left
self.between = between
self.and_ = and_
self.not_ = not_
class ExistsSubqueryExpression:
def __init__(self, start_index: int, stop_index: int, subquery_segment: SubquerySegment):
self.start_index = start_index
self.stop_index = stop_index
self.subquery_segment = subquery_segment
class ParameterMarkerExpressionSegment:
def __init__(self, start_index: int, stop_index: int, value: str):
self.start_index = start_index
self.stop_index = stop_index
self.value = value
class CommonExpressionSegment:
def __init__(self, start_index: int, stop_index: int, text: str):
self.start_index = start_index
self.stop_index = stop_index
self.text = text
class LiteralExpressionSegment:
def __init__(self, start_index: int, stop_index: int, value: str):
self.start_index = start_index
self.stop_index = stop_index
self.value = value
class SubqueryExpressionSegment:
pass
class LockSegment:
def __init__(self, start_index: int, stop_index: int):
self.start_index = start_index
self.stop_index = stop_index
self.table = []
class NotExpression:
def __init__(self, start_index: int, stop_index: int, expression: ExpressionSegment):
self.start_index = start_index
self.stop_index = stop_index
self.expression = expression
class BinaryOperationExpression:
def __init__(self, start_index: int, stop_index: int,
left: ExpressionSegment, right: ExpressionSegment,
operator: str, text: str):
self.start_index = start_index
self.stop_index = stop_index
self.left = left
self.right = right
self.operator = operator
self.text = text
"""
|
[
"jsbxyyx@163.com"
] |
jsbxyyx@163.com
|
e3f687bcd124488b172bd73651a8d9237a9fccea
|
1c790b0adc648ff466913cf4aed28ace905357ff
|
/python/lbann/modules/graph/sparse/NNConv.py
|
8a8e61aeea88d0cbab3e6c1be5a8f9e717a03a80
|
[
"Apache-2.0"
] |
permissive
|
LLNL/lbann
|
04d5fdf443d6b467be4fa91446d40b620eade765
|
e8cf85eed2acbd3383892bf7cb2d88b44c194f4f
|
refs/heads/develop
| 2023-08-23T18:59:29.075981
| 2023-08-22T22:16:48
| 2023-08-22T22:16:48
| 58,576,874
| 225
| 87
|
NOASSERTION
| 2023-09-11T22:43:32
| 2016-05-11T20:04:20
|
C++
|
UTF-8
|
Python
| false
| false
| 7,203
|
py
|
import lbann
from lbann.modules import Module, ChannelwiseFullyConnectedModule
class NNConv(Module):
"""Details of the kernel is available at:
"Neural Message Passing for Quantum Chemistry"
https://arxiv.org/abs/1704.01212
"""
global_count = 0
def __init__(self,
sequential_nn,
num_nodes,
num_edges,
input_channels,
output_channels,
edge_input_channels,
activation=lbann.Relu,
name=None,
parallel_strategy={}):
"""Inititalize the edge conditioned graph kernel with edge data
represented with pseudo-COO format. The reduction over edge
features are performed via the scatter layer
The update function of the kernel is:
.. math::
X^{\prime}_{i} = \Theta x_i + \sum_{j \in \mathcal{N(i)}}x_j \cdot h_{\Theta}(e_{i,j})
where :math:`h_{\mathbf{\Theta}}` denotes a channel-wise NN module
Args:
sequential_nn ([Module] or (Module)): A list or tuple of layer
modules for updating the
edge feature matrix
num_nodes (int): Number of vertices of each graph
(max number in the batch padded by 0)
num_edges (int): Number of edges of each graph
(max in the batch padded by 0)
output_channels (int): The output size of each node feature after
transformed with learnable weights
activation (type): The activation function of the node features
name (str): Default name of the layer is NN_{number}
parallel_strategy (dict): Data partitioning scheme.
"""
NNConv.global_count += 1
self.name = (name
if name
else 'NNConv_{}'.format(NNConv.global_count))
self.output_channels = output_channels
self.input_channels = input_channels
self.num_nodes = num_nodes
self.num_edges = num_edges
self.edge_input_channels = edge_input_channels
self.node_activation = activation
self.parallel_strategy = parallel_strategy
self.node_nn = \
ChannelwiseFullyConnectedModule(self.output_channels,
bias=False,
activation=self.node_activation,
name=self.name+"_node_weights",
parallel_strategy=self.parallel_strategy)
self.edge_nn = sequential_nn
def message(self,
node_features,
neighbor_features,
edge_features):
"""Update node features and edge features. The Message stage of the
convolution.
Args:
node_features (Layer); A 2D layer of node features of
shape (num_nodes, input_channels)
neighbor_features (Layer): A 3D layer of node features of
shape (num_edges, 1, input_channels)
edge_features (Layer): A 2D layer of edge features of
shape (num_edges, edge_features)
Returns:
(Layer, Layer): Returns the updated node features and the messages
for each node.
"""
## These reshapes do not change the nn output but enables channelwise partitioning
## for distconv channelwiseFC natively
node_features = lbann.Reshape(node_features, dims=[self.num_nodes, 1, self.input_channels])
edge_features = lbann.Reshape(edge_features, dims=[self.num_edges, 1, self.edge_input_channels])
updated_node_features = self.node_nn(node_features)
edge_update = None
for layer in self.edge_nn:
if edge_update:
edge_update = layer(edge_update)
else:
edge_update = layer(edge_features)
edge_values = \
lbann.Reshape(edge_update,
dims=[self.num_edges,
self.input_channels,
self.output_channels],
name=self.name+"_edge_mat_reshape")
edge_values = \
lbann.MatMul(neighbor_features, edge_values)
return updated_node_features, edge_values
def aggregate(self,
edge_values,
edge_indices):
"""Aggregate the messages from the neighbors of the nodes
Args:
edge_values (Layer): A layer of edge features of
shape (num_edges, edge_features)
edge_indices (Layer): A 1D layer of node features of
shape (num_edges).
The indices used for reduction
Returns:
(Layer): A 2D layer of updated node features
"""
node_feature_dims = [self.num_nodes , self.output_channels]
edge_feature_dims = [self.num_edges , self.output_channels]
edge_values = lbann.Reshape(edge_values,
dims=edge_feature_dims,
name=self.name+"_neighbor_features")
edge_reduce = lbann.Scatter(edge_values,
edge_indices,
dims=node_feature_dims,
axis=0,
name=self.name+"_aggregate")
return edge_reduce
def forward(self,
node_features,
neighbor_features,
edge_features,
edge_index):
"""Apply NNConv layer.
Args:
node_features (Layer): A 2D layer of node features of
shape (num_nodes, input_channels)
neighbor_features (Layer): A 3D layer of node features of
shape (num_edges, 1, input_channels)
edge_features (Layer): A 2D layer of edge features of
shape (num_edges, edge_features)
edge_index (Layer): A 1D layer of node features of
shape (num_edges * output_channels).
The indices used for reduction
Returns:
(Layer): The output after NNConv. The output layer has the shape
(num_nodes, self.output_channels)
"""
updated_node_fts, neighbor_vals = self.message(node_features,
neighbor_features,
edge_features)
aggregated_fts = self.aggregate(neighbor_vals, edge_index)
update = lbann.Sum(updated_node_fts,
aggregated_fts,
name=self.name+"_updated_node_features")
return update
|
[
"noreply@github.com"
] |
LLNL.noreply@github.com
|
0f8e5e98a83027e12be70103548b1d15138d3074
|
d22fc5d683ea0ece1eb1623d81603b0c8a59da98
|
/zillowdb/packages/sfm/string_match.py
|
fa5058591fd9e0578a355e0fdaee05c42c1732c7
|
[
"MIT"
] |
permissive
|
MacHu-GWU/zillowdb-project
|
27117b00b330ca48af3972a2ae1beb28a98da5ca
|
020266257311fa667a3b5fcca15450eb00584aaf
|
refs/heads/master
| 2021-01-19T09:20:59.556094
| 2017-02-15T20:44:48
| 2017-02-15T20:44:48
| 82,104,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fuzzywuzzy import process
def choose_best(text, choice, criterion=None):
if criterion is None:
return choose_best(text, choice, criterion=0)
else:
res, confidence_level = process.extractOne(text, choice)
if confidence_level >= criterion:
return res
else:
return None
if __name__ == "__main__":
choice = ["Atlanta Falcons", "New Cow Jets", "Tom boy", "New York Giants", "Dallas Cowboys"]
text = "cowboy"
res = choose_best(text, choice)
print(res)
|
[
"husanhe@gmail.com"
] |
husanhe@gmail.com
|
3004cbc484b80119507eb7f8708fb1370930251b
|
e530016831a0140a34a4f12a1517c66348d8109e
|
/backoffice/contabilidad/migrations/0001_initial.py
|
93e07b4ef45f9a21b63a091f7b6fa2a88a24fff1
|
[] |
no_license
|
ryujiin/storeserver
|
0e959c269b2d7885c142a10ca66139c6bb611522
|
043e76a6917d930f5c27840cf276ce36c7d991bf
|
refs/heads/master
| 2022-12-11T18:38:29.764044
| 2018-08-08T15:50:02
| 2018-08-08T15:50:02
| 136,964,735
| 0
| 0
| null | 2022-12-08T00:45:40
| 2018-06-11T18:22:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-24 23:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Egreso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField()),
('monto', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('tipo', models.CharField(blank=True, max_length=100)),
('nota', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Ingreso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField()),
('monto', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('tipo', models.CharField(blank=True, max_length=100)),
('nota', models.TextField(blank=True)),
],
),
]
|
[
"ryujiin22@gmail.com"
] |
ryujiin22@gmail.com
|
f121f2ea956a73bfc1479762b76d1592eea3e6d7
|
f900a9f48fe24c6a581bcb28ad1885cfe5743f80
|
/Chapter_15/random_walk.py
|
d7954dcedfa24837b832900dbc2bf853f8e2de9e
|
[] |
no_license
|
Anjali-225/PythonCrashCourse
|
76e63415e789f38cee019cd3ea155261ae2e8398
|
f9b9649fe0b758c04861dad4d88058d48837a365
|
refs/heads/master
| 2022-12-03T21:35:07.428613
| 2020-08-18T11:42:58
| 2020-08-18T11:42:58
| 288,430,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
from random import choice
class RandomWalk:
"""A class to generate random walks."""
def __init__(self, num_points=5000):
"""Initialize attributes of a walk."""
self.num_points = num_points
# All walks start at (0, 0).
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
"""Calculate all the points in the walk."""
# Keep taking steps until the walk reaches the desired length.
while len(self.x_values) < self.num_points:
# Decide which direction to go and how far to go in that direction.
x_direction = choice([1, -1])
x_distance = choice([0, 1, 2, 3, 4])
x_step = x_direction * x_distance
y_direction = choice([1, -1])
y_distance = choice([0, 1, 2, 3, 4])
y_step = y_direction * y_distance
# Reject moves that go nowhere.
if x_step == 0 and y_step == 0:
continue
# Calculate the new position.
x = self.x_values[-1] + x_step
y = self.y_values[-1] + y_step
self.x_values.append(x)
self.y_values.append(y)
|
[
"noreply@github.com"
] |
Anjali-225.noreply@github.com
|
74a826f628afae63a211d3f9a9edfa9a82abb7e8
|
06887e27c9c27e15a1295bab418998685fd5cf80
|
/app/__init__.py
|
8266d76887a19ddb077cce6c881729019ac89f71
|
[] |
no_license
|
Lenchok/struggle
|
7db061f2ad17a6191b88289c330ba386eaa460b7
|
04167c55faa1498373d682fb39e87b98ab187ec8
|
refs/heads/master
| 2020-03-25T06:29:14.291912
| 2018-08-04T06:57:35
| 2018-08-04T06:57:35
| 143,504,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from flask import Flask
from config import config
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
[
"you@example.com"
] |
you@example.com
|
b70fec8e0fe10789c193b948b2bd5e54d9877a0c
|
b088f48c2ac006b1c0afcbba13568fb143a7d248
|
/translate.py
|
edf27d8f257263de72871684abc2b437c38ed375
|
[] |
no_license
|
PanXiebit/transformer-tf2.0
|
e58ee3fbfe3f270be464ae56bccbca931e9458c6
|
8eb297a0a4ba650a0d0b5afba2cbb2e634b7d05b
|
refs/heads/master
| 2020-05-21T08:55:06.031628
| 2019-06-18T00:20:55
| 2019-06-18T00:20:55
| 185,985,287
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,599
|
py
|
from model_config import Config
from model.transformer import Transformer
import tensorflow as tf
from data_process.dataset import tokenizer_en, tokenizer_pt
from model.multi_head_attention import create_mask
config = Config()
transformer = Transformer(config.num_layers, config.d_model, config.num_heads, config.dff,
config.input_vocab_size, config.target_vocab_size, config.dropout_rate)
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(transformer=transformer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Restored latest checkpoint {}'.format(ckpt_manager.latest_checkpoint))
MAX_LENGTH = 40
def evaluate(inp_sentence):
start_token = [tokenizer_pt.vocab_size]
end_token = [tokenizer_pt.vocab_size + 1]
# inp sentence is portuguese, hence adding the start and end token
inp_sentence = start_token + tokenizer_pt.encode(inp_sentence) + end_token
encoder_input = tf.expand_dims(inp_sentence, 0)
# as the target is english, the first word to the transformer should be the
# english start token.
decoder_input = [tokenizer_en.vocab_size]
output = tf.expand_dims(decoder_input, 0)
for i in range(MAX_LENGTH):
enc_padding_mask, combined_mask, dec_padding_mask = create_mask(
encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = transformer(encoder_input,
output,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if tf.equal(predicted_id, tokenizer_en.vocab_size + 1):
return tf.squeeze(output, axis=0), attention_weights
# concatentate the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0), attention_weights
|
[
"ftdpanxie@gmail.com"
] |
ftdpanxie@gmail.com
|
15abc4d6eabadcb283773ef994d7e11300fcafa5
|
173aa19210509b0c84808a3b16c1d21047aa6b1a
|
/health_4.py
|
cf52cdecc02ea4975a166b38d9102e1eb9cf0b5e
|
[] |
no_license
|
wangleixin666/ML_healthAI
|
78ccea5ef824e63a2f7b1be40fbb385beac3643f
|
dcf41dd15d6f9d033f58c8c5384c786d8ab1ff72
|
refs/heads/master
| 2020-03-23T10:06:49.757535
| 2018-07-18T11:33:28
| 2018-07-18T11:33:28
| 141,426,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,982
|
py
|
# coding:utf-8
# 删除掉一些出现次数低,缺失比例大的字段,保留超过阈值的特征
import pandas as pd
def remain_feat(df, thresh):
exclude_feats = []
print('----------移除数据缺失多的字段-----------')
print('移除之前总的字段数量',len(df.columns))
num_rows = df.shape[0]
for c in df.columns:
num_missing = df[c].isnull().sum()
if num_missing == 0:
continue
missing_percent = num_missing / float(num_rows)
if missing_percent > thresh:
exclude_feats.append(c)
print(u"移除缺失数据的字段数量: %s" % len(exclude_feats))
# 保留超过阈值的特征
feats = []
for c in df.columns:
if c not in exclude_feats:
feats.append(c)
print(u'剩余的字段数量',len(feats))
return feats
train_data = pd.read_csv(r'D:/kaggle/health/temp_data/train_other.csv')
feature = remain_feat(train_data, thresh=0.999)
# 一共有40000行,出现200次以上吧,也就是0.005省去,出现几率大于0.995的特征228个
# 出现400次以上的0.99,只剩下了174个特征,除去5个label,1个vid,还剩下169个特征
# 0.992 190-6=184
print feature
train_data_delted = train_data[feature]
train_data_delted.to_csv(r'D:/kaggle/health/temp_data/train_other_deleted.csv', index=False, sep=',', encoding='utf-8')
"""
0.99
['004997', '0105', '0106', '0107', '0108', '0109', '0111', '0112', '019001', '019002',
'019003', '019017', '069002', '069003', '069004', '100005', '100007', '10012', '1124',
'1125', '131', '1343', '1349', '137', '1456', '184', '21A002', '269028', '269029', '269030',
'269031', '279001', '279002', '279003', '279005', '279006', '300006', '300007', '300069', '300070',
'300087', '300119', '300129', '300136', '31', '310', '311', '315', '316', '317', '3184', '319', '319100',
'33', '339125', '339126', '339128', '339129', '339130', '339131', '34', '35', '36', '369108', '37', '39',
'459154', '459155', '459156', '459158', '459159', '459161', '459206', '459207', '459211', '669001', '669004',
'669005', '699009', '709004', '709013', '709016', '709019', '709020', '709022', '709023', '709024', '709025',
'709043', '809004', '809005', '809006', '809007', '809008', '809009', '809010', '809011', '809012', '809013',
'809014', '809015', '809016', '809017', '809018', '809019', '809020', '809022', '809023', '809026', '809027',
'809028', '809029', '809030', '809031', '809032', '809033', '809034', '809035', '809037', '809038', '809039',
'809040', '809041', '809042', '809043', '809044', '809045', '809046', '809047', '809048', '809049', '809050',
'809051', '809053', '809054', '809055', '809056', '809057', '809058', '809059', '809060', '809061', '819008',
'819009', '819010', '819011', '819012', '819013', '819014', '819015', '819016', '819017', '819019', '819020',
'819021', '819022', '819023', '819024', '819025', '819026', '819027', '819028', '819029', '979010', '979024',
'979025', '979026', '979027']
0.999
['004997', '0105', '0106', '0107', '0108', '0109', '0111', '0112', '019001', '019002', '019003', '019004', '019007',
'019008', '019017', '019032', '019033', '019034', '019035', '019036', '019037', '019038', '019039', '019040', '019041',
'019042', '019043', '019044', '019045', '019046', '019047', '019048', '019049', '019050', '019051', '019052', '019053',
'019054', '019055', '019056', '019059', '019062', '069002', '069003', '069004', '069005', '069007', '069008', '069010',
'069023', '069044', '069049', '069050', '1', '10000', '100005', '100007', '10005', '10012', '10014', '1123', '1124',
'1125', '1136', '129056', '129057', '129058', '129079', '131', '134', '1343', '1346', '1349', '1359', '137', '1456',
'1461', '1471', '159053', '159063', '179176', '179177', '179178', '179226', '1816', '184', '1849', '186', '1915',
'1918', '199118', '20000', '21A002', '21A012', '21A021', '229080', '2392', '2451', '2452', '2453', '2454', '269028',
'269029', '269030', '269031', '269052', '269055', '269056', '269057', '269058', '279001', '279002', '279003', '279004',
'279005', '279006', '279028', '299168', '300003', '300006', '300007', '300015', '300022', '300037', '300038', '300039',
'300069', '300070', '300072', '300075', '300077', '300080', '300087', '300111', '300114', '300117', '300118', '300119',
'300129', '300136', '300146', '300166', '300168', '300169', '300170', '300171', '300172', '300174', '300175', '300176',
'300178', '300179', '300180', '300181', '300182', '300183', '300184', '300185', '300186', '300187', '300188', '300189',
'300190', '31', '310', '311', '315', '316', '317', '3184', '319', '319100', '319159', '319273', '3205', '3206', '3211',
'3217', '33', '3302', '339105', '339106', '339107', '339114', '339122', '339125', '339126', '339128', '339129', '339130',
'339131', '339135', '34', '346', '35', '36', '369007', '369008', '369085', '369098', '369108', '37', '378', '3814',
'3816', '3818', '39', '419008', '439011', '439015', '439016', '439035', '459116', '459117', '459141', '459154', '459155',
'459156', '459158', '459159', '459161', '459181', '459182', '459183', '459184', '459206', '459207', '459208', '459209',
'459211', '459327', '459329', '459330', '459331', '459332', '459333', '459336', '459337', '459338', '459340', '459342',
'509006', '509013', '539004', '559007', '559046', '559047', '669001', '669004', '669005', '669010', '669014', '669043',
'669044', '669045', '669046', '699001', '699003', '699004', '699005', '699006', '699009', '709004', '709013', '709016',
'709019', '709020', '709022', '709023', '709024', '709025', '709027', '709030', '709031', '709043', '709044', '709048',
'729028', '739005', '759001', '769008', '769019', '809004', '809005', '809006', '809007', '809008', '809009', '809010',
'809011', '809012', '809013', '809014', '809015', '809016', '809017', '809018', '809019', '809020', '809022', '809023',
'809026', '809027', '809028', '809029', '809030', '809031', '809032', '809033', '809034', '809035', '809036', '809037',
'809038', '809039', '809040', '809041', '809042', '809043', '809044', '809045', '809046', '809047', '809048', '809049',
'809050', '809051', '809053', '809054', '809055', '809056', '809057', '809058', '809059', '809060', '809061', '819006',
'819008', '819009', '819010', '819011', '819012', '819013', '819014', '819015', '819016', '819017', '819019', '819020',
'819021', '819022', '819023', '819024', '819025', '819026', '819027', '819028', '819029', '819030', '819031', '839018',
'899021', '899022', '909001', '979010', '979024', '979025', '979026', '979027', '979029', '979091', '979092', '979093',
'979094', '979095', '979096', '989001', '989002', '989003', '989043', '989065', 'A49018', 'C19103', 'C39002', 'D29008',
'D29009', 'D29010', 'D29011', 'G49050', 'I19027', 'I69003', 'I69004', 'I69005', 'J29018', 'K29002', 'L19008', 'P19033',
'P79002', 'Q99001', 'Q99002', 'T99001', 'T99002', 'U99009', 'X19001', 'X19002', 'X19003', 'X19011', 'Y79001']
"""
|
[
"wangleixin@yahoo.com"
] |
wangleixin@yahoo.com
|
0575d7d9a3ca2056933ecd9b1d9c8799f3572e87
|
a80e9eb7ade3d43ce042071d796c00dd10b93225
|
/ch_7/Quadratic.py
|
e77eacca7d2fc9c2eae058ebf9bb5816848fc724
|
[] |
no_license
|
ksjpswaroop/python_primer
|
69addfdb07471eea13dccfad1f16c212626dee0a
|
99c21d80953be3c9dc95f3a316c04b0c5613e830
|
refs/heads/master
| 2020-07-14T17:37:45.923796
| 2014-06-06T22:30:48
| 2014-06-06T22:30:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
# Exercise 7.6
from numpy import linspace
from cmath import sqrt
class Quadratic:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def value(self, x):
a, b, c = self.a, self.b, self.c
return a * x ** 2 + b * x + c
def table(self, L, R, n=10):
xlist = linspace(L, R, n)
print 'y = ax^2 + bx + c'
print 'a = %g, b = %g, c = %g' % (self.a, self.b, self.c)
print '-' * 30
print '%12s %12s' % ('x', 'y')
print '-' * 30
for x in xlist:
print '%12g %12g' % (x, self.value(x))
def roots(self):
a, b, c = self.a, self.b, self.c
d = sqrt(b * b - 4 * a * c)
r1 = (-b + d) / (2 * a)
r2 = (-b - d) / (2 * a)
return r1, r2
quad = Quadratic(2, -6, 12)
print quad.value(0)
print quad.value(5)
print quad.roots()
quad.table(-5, 5, 11)
"""
Sample run:
python Quadratic.py
12
32
((1.5+1.9364916731037085j), (1.5-1.9364916731037085j))
y = ax^2 + bx + c
a = 2, b = -6, c = 12
------------------------------
x y
------------------------------
-5 92
-4 68
-3 48
-2 32
-1 20
0 12
1 8
2 8
3 12
4 20
5 32
"""
|
[
"noahwaterfieldprice@gmail.com"
] |
noahwaterfieldprice@gmail.com
|
c1aa83d130ce68b30efd26154ab61c0fdc349fe8
|
7b739d1ea5f53d8cfffc18f23044aec11238b2bb
|
/단계별/15. 백트래킹/01. N과 M (1).py
|
2db337c80c24122ac1e1c87d4200e5600f0c3b60
|
[] |
no_license
|
Choojj/acmicpc
|
2281f299386174388c08a31ac6ad76c4b1a2ce6c
|
0fa3fc2c575cb1de51be2dc41408b50d7fd7a00f
|
refs/heads/master
| 2022-12-10T09:11:27.310472
| 2020-09-04T08:46:06
| 2020-09-04T08:46:06
| 270,581,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
import sys
def backtraking(N, M, depth):
if (depth == M):
for i in range(len(answer_list)):
print(answer_list[i], end = " ")
print()
return
for i in range(1, N + 1):
if (not check_list[i]):
answer_list[depth] = i
check_list[i] = True
backtraking(N, M, depth + 1)
check_list[i] = False
N, M = map(int, sys.stdin.readline().split())
check_list = [False] * (N + 1)
answer_list = [0] * M
backtraking(N, M, 0)
|
[
"qweasd9526@naver.com"
] |
qweasd9526@naver.com
|
b11ad1600711c8c9f71b5ec5982dc342a418b536
|
b93c188b16cbb9917e5a3af1239aa2c1f02c5600
|
/tests/requests/valid/012.py
|
10a54334d22c8888a323961ca77790be5e4e97a2
|
[
"MIT",
"BSD-3-Clause",
"FSFAP"
] |
permissive
|
tilgovi/restkit
|
1e2fc0131afc721804054396e0ad79e70e9317ee
|
12d97ea867eb0516bf2ed885b9d6007b84c79bb2
|
refs/heads/master
| 2021-01-18T08:52:27.148178
| 2010-11-26T07:31:46
| 2010-11-26T07:31:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
request = {
"method": "POST",
"uri": uri("/chunked_w_trailing_headers"),
"version": (1, 1),
"headers": [
("Transfer-Encoding", "chunked")
],
"body": "hello world",
"trailers": [
("Vary", "*"),
("Content-Type", "text/plain")
]
}
|
[
"bchesneau@gmail.com"
] |
bchesneau@gmail.com
|
4911b631beb8973c72847d4fe73f50f5ea01f7fd
|
ac7435b0b3faa6b6cf51d0d6b43984b77b70a37c
|
/nova/pci/whitelist.py
|
603b29400fbc330bc792a44f5e1b39aef77762ca
|
[
"Apache-2.0"
] |
permissive
|
gokrokvertskhov/nova-mesos-driver
|
04688cd51cad9790cf5460b44ba527b51080760d
|
fdb9c8468f6a8680c19095a81bf77884ae61e170
|
refs/heads/master
| 2021-01-10T10:51:07.096729
| 2016-03-25T01:45:10
| 2016-03-25T01:45:10
| 54,685,199
| 0
| 1
|
Apache-2.0
| 2020-07-24T01:00:58
| 2016-03-25T01:22:06
|
Python
|
UTF-8
|
Python
| false
| false
| 4,014
|
py
|
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _
from nova.pci import devspec
pci_opts = [cfg.MultiStrOpt('pci_passthrough_whitelist',
default=[],
help='White list of PCI devices available to VMs. '
'For example: pci_passthrough_whitelist = '
'[{"vendor_id": "8086", "product_id": "0443"}]'
)
]
CONF = cfg.CONF
CONF.register_opts(pci_opts)
LOG = logging.getLogger(__name__)
class Whitelist(object):
"""White list class to decide assignable pci devices.
Not all devices on compute node can be assigned to guest, the
cloud administrator decides the devices that can be assigned
based on vendor_id or product_id etc. If no white list specified,
no device will be assignable.
"""
def _parse_white_list_from_config(self, whitelists):
"""Parse and validate the pci whitelist from the nova config."""
specs = []
for jsonspec in whitelists:
try:
dev_spec = jsonutils.loads(jsonspec)
except ValueError:
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'") % jsonspec)
if isinstance(dev_spec, dict):
dev_spec = [dev_spec]
elif not isinstance(dev_spec, list):
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'; "
"Expecting list or dict") % jsonspec)
for ds in dev_spec:
if not isinstance(ds, dict):
raise exception.PciConfigInvalidWhitelist(
reason=_("Invalid entry: '%s'; "
"Expecting dict") % ds)
spec = devspec.PciDeviceSpec(ds)
specs.append(spec)
return specs
def __init__(self, whitelist_spec=None):
"""White list constructor
For example, followed json string specifies that devices whose
vendor_id is '8086' and product_id is '1520' can be assigned
to guest.
'[{"product_id":"1520", "vendor_id":"8086"}]'
:param whitelist_spec: A json string for a list of dictionaries,
each dictionary specifies the pci device
properties requirement.
"""
super(Whitelist, self).__init__()
if whitelist_spec:
self.specs = self._parse_white_list_from_config(whitelist_spec)
else:
self.specs = []
def device_assignable(self, dev):
"""Check if a device can be assigned to a guest.
:param dev: A dictionary describing the device properties
"""
for spec in self.specs:
if spec.match(dev):
return True
return False
def get_devspec(self, pci_dev):
for spec in self.specs:
if spec.match_pci_obj(pci_dev):
return spec
def get_pci_device_devspec(pci_dev):
dev_filter = Whitelist(CONF.pci_passthrough_whitelist)
return dev_filter.get_devspec(pci_dev)
|
[
"gokrokvertskhov@mirantis.com"
] |
gokrokvertskhov@mirantis.com
|
d0e92bdb9509ec14e81a1049597521136059a5c9
|
9d61f7ef9eee4187111aa3337af3f3326d56b67e
|
/thruster_driver/scripts/thruster_driver
|
1d948a7c27858dbb4ed1666dec1e8fcd29e73a49
|
[] |
no_license
|
uf-mil-archive/SubjuGator
|
a6e99a06e5a7daaad1303cc69a45a17ac4e6ee00
|
c0f4d5296a33f939ede2784cd9297daea929513e
|
refs/heads/master
| 2021-05-29T20:03:26.424539
| 2015-07-25T19:17:28
| 2015-07-25T19:19:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,511
|
#!/usr/bin/env python
from __future__ import division
import math
import struct
import numpy
import roslib
roslib.load_manifest('thruster_driver')
import rospy
from std_msgs.msg import Header
from geometry_msgs.msg import Point, Vector3
from sensor_msgs.msg import MagneticField
from thruster_handling.broadcaster import ThrusterBroadcaster
from magnetic_dynamic_compensation.msg import FieldInfo
from kill_handling.listener import KillListener
from embedded_protocol import embedded
from thruster_driver.srv import DoMagneticCalibration, DoMagneticCalibrationResponse
rospy.init_node('thruster_driver', anonymous=True)
address = rospy.get_param('~address')
port = rospy.get_param('~port')
local_address = rospy.get_param('~local_address')
remote_address = rospy.get_param('~remote_address')
thruster_id = rospy.get_param('~thruster_id')
frame_id = rospy.get_param('~frame_id')
position = rospy.get_param('~position')
direction = rospy.get_param('~direction')
rev_force = rospy.get_param('~rev_force')
fwd_force = rospy.get_param('~fwd_force')
mag_frame_id = rospy.get_param('~mag_frame_id', None)
mag_coeffs = rospy.get_param('~mag_coeffs', None)
kill_listener = KillListener(killed_callback=lambda: thrustercommand_callback(0))
# forward commands to thruster
conn = embedded.Embedded(address, port, local_address, remote_address)
def thrustercommand_callback(force):
if kill_listener.get_killed() and force != 0:
return
scaled = force / fwd_force if force >= 0 else force / rev_force
clamped = -1 if scaled < -1 else 1 if scaled > 1 else scaled
x = int(math.floor(clamped * 100 * 2**8 + .5))
conn.send(struct.pack('<BBH', 0, 3, 0x8000|x if x >= 0 else -x))
info_period = rospy.Duration(1)
thruster_broadcaster = ThrusterBroadcaster(
frame_id=frame_id,
id=thruster_id,
lifetime=info_period * 2,
position=position,
direction=direction,
min_force=-rev_force,
max_force=+fwd_force,
torque_per_force=[0, 0, 0],
command_callback=thrustercommand_callback,
)
rospy.Timer(info_period, lambda timerevent: thruster_broadcaster.send())
def heartbeat():
conn.send('')
conn.send(struct.pack('<BBB', 0, 1, 50)) # StartPublishing(50hz)
rospy.Timer(rospy.Duration(.1), lambda _: heartbeat())
def do_magnetic_calibration(req):
_mag_holder = [None]
def _got_mag(msg):
_mag_holder[:] = [numpy.array([msg.magnetic_field.x, msg.magnetic_field.y, msg.magnetic_field.z]), msg.header.frame_id]
mag_sub = rospy.Subscriber('/imu/mag_raw', MagneticField, _got_mag)
def wait_for_mag():
_mag_holder[0] = None
while _mag_holder[0] is None:
rospy.sleep(.01)
return _mag_holder[0], _mag_holder[1]
N = 21
currents = []
mags = []
for dir, i in [('fwd', _) for _ in range(N)] + [('rev', _) for _ in reversed(range(N))]:
force = -rev_force + (fwd_force - -rev_force)*i/(N-1)
thrustercommand_callback(force)
rospy.sleep(.5)
currents.append(wait_for_current())
mags.append(wait_for_mag()[0])
print dir, i, force, currents[-1], mags[-1]
thrustercommand_callback(0)
currents = numpy.array(currents)
mags = numpy.array(mags)
#from matplotlib import pyplot
#pyplot.plot(currents, mags[:,0])
#pyplot.show()
global mag_frame_id
mag_frame_id = wait_for_mag()[1]
rospy.set_param('~mag_frame_id', mag_frame_id)
ORDER = 3
posfit = numpy.polyfit(*zip(*[pair for pair in zip(currents, mags) if pair[0] > 0]) + [ORDER])
negfit = numpy.polyfit(*zip(*[pair for pair in zip(currents, mags) if pair[0] < 0]) + [ORDER])
posfit[-1] = 0
negfit[-1] = 0
global mag_coeffs
mag_coeffs = [[map(float, row) for row in negfit], [map(float, row) for row in posfit]]
rospy.set_param('~mag_coeffs', mag_coeffs)
mag_sub.unregister()
return DoMagneticCalibrationResponse()
rospy.Service('~do_magnetic_calibration', DoMagneticCalibration, do_magnetic_calibration)
_current_holder = [None]
def wait_for_current():
_current_holder[0] = None
while _current_holder[0] is None:
rospy.sleep(.01)
return _current_holder[0]
mag_pub = rospy.Publisher('/imu/mag_generated_info', FieldInfo)
while not rospy.is_shutdown():
data = conn.recv()
now = rospy.Time.now()
if len(data) != 13:
print 'wrong length', len(data)
continue
typecode, tickcount, flags, refinput_, presentoutput_, railvoltage_, current_ = struct.unpack('<BHHHHHH', data)
if typecode != 0:
print 'wrong typecode', typecode
continue
refinput = (refinput_ & ~0x8000) / 2**8 * (-1 if refinput_ & 0x8000 else 1)
presentoutput, railvoltage, current = presentoutput_ / 2**10, railvoltage_ / 2**10, current_ / 2**12
signed_current = current * (1 if refinput >= 0 else -1)
_current_holder[0] = signed_current
if mag_frame_id is not None and mag_coeffs is not None:
magnetic_field = numpy.zeros(3) if signed_current == 0 else \
numpy.polyval(mag_coeffs[0], signed_current) if signed_current < 0 else \
numpy.polyval(mag_coeffs[1], signed_current)
mag_pub.publish(FieldInfo(
header=Header(
stamp=now,
frame_id=mag_frame_id,
),
id=rospy.get_name(),
lifetime=rospy.Duration(1), # XXX
magnetic_field=Vector3(*magnetic_field),
))
|
[
"forrest@forre.st"
] |
forrest@forre.st
|
|
584855499f37277bd64c47b5e6c8cc26c6157388
|
2443f23d928a6b3516f810e3dfdf6f4b72aa0325
|
/st01.Python기초/py04연산자/py04_02_몫과머지.py
|
126d71891077d16c18cc6ab311b0782a4002050c
|
[] |
no_license
|
syuri7/Python20200209
|
48653898f0ce94b8852a6a43e4e806adcf8cd233
|
5f0184d9b235ce366e228b84c663a376a9957962
|
refs/heads/master
| 2021-01-01T10:37:59.077170
| 2020-03-15T09:15:50
| 2020-03-15T09:15:50
| 239,241,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
import math #math 모듈 불러오기
# 몫과 나머지 구하기
num=5/2
print(num) # (출력값) 2.5
# 몫을 구하시오
quotient = 5//2
print("몫은: ", quotient) # (출력값) 2
# 몫을 구하시오 math.floor()
quotient = math.floor(num)
print("몫은: ", quotient) # (출력값) 2
# 나머지를 구하시오
remainder = 5%2
print("나머지는: ", remainder) # (출력값) 1
|
[
"d@d"
] |
d@d
|
7591cd1eb094e2dd03e1ba8e953311c8092df947
|
59f64b5cf799e31c97b11828dba4787afb8f3f17
|
/hail/python/test/hail/utils/test_struct_repr_pprint.py
|
338fa33f57bfc4dae35e4ce817280c5513926ae3
|
[
"MIT"
] |
permissive
|
hail-is/hail
|
2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1
|
07a483ae0f46c66f3ed6fd265b48f48c06298f98
|
refs/heads/main
| 2023-09-01T15:03:01.450365
| 2023-09-01T02:46:35
| 2023-09-01T02:46:35
| 45,069,467
| 913
| 262
|
MIT
| 2023-09-14T21:53:32
| 2015-10-27T20:55:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,823
|
py
|
import hail as hl
from pprint import pformat
def test_repr_empty_struct():
assert repr(hl.Struct()) == 'Struct()'
def test_repr_identifier_key_struct():
assert repr(hl.Struct(x=3)) == 'Struct(x=3)'
def test_repr_two_identifier_keys_struct():
assert repr(hl.Struct(x=3, y=3)) == 'Struct(x=3, y=3)'
def test_repr_non_identifier_key_struct():
assert repr(hl.Struct(**{'x': 3, 'y ': 3})) == "Struct(**{'x': 3, 'y ': 3})"
def test_repr_struct_in_struct_all_identifiers():
assert repr(hl.Struct(x=3, y=3, z=hl.Struct(a=5))) == 'Struct(x=3, y=3, z=Struct(a=5))'
def test_repr_struct_in_struct_some_non_identifiers1():
assert repr(hl.Struct(x=3, y=3, z=hl.Struct(**{'a ': 5}))) == "Struct(x=3, y=3, z=Struct(**{'a ': 5}))"
def test_repr_struct_in_struct_some_non_identifiers2():
assert repr(hl.Struct(**{'x': 3, 'y ': 3, 'z': hl.Struct(a=5)})) == "Struct(**{'x': 3, 'y ': 3, 'z': Struct(a=5)})"
def test_pformat_empty_struct():
assert pformat(hl.Struct()) == 'Struct()'
def test_pformat_identifier_key_struct():
assert pformat(hl.Struct(x=3)) == 'Struct(x=3)'
def test_pformat_two_identifier_keys_struct():
assert pformat(hl.Struct(x=3, y=3)) == 'Struct(x=3, y=3)'
def test_pformat_non_identifier_key_struct():
assert pformat(hl.Struct(**{'x': 3, 'y ': 3})) == "Struct(**{'x': 3, 'y ': 3})"
def test_pformat_struct_in_struct_all_identifiers():
assert pformat(hl.Struct(x=3, y=3, z=hl.Struct(a=5))) == 'Struct(x=3, y=3, z=Struct(a=5))'
def test_pformat_struct_in_struct_some_non_identifiers1():
assert pformat(hl.Struct(x=3, y=3, z=hl.Struct(**{'a ': 5}))) == "Struct(x=3, y=3, z=Struct(**{'a ': 5}))"
def test_pformat_struct_in_struct_some_non_identifiers2():
assert pformat(hl.Struct(**{'x': 3, 'y ': 3, 'z': hl.Struct(a=5)})) == "Struct(**{'x': 3, 'y ': 3, 'z': Struct(a=5)})"
def test_pformat_small_struct_in_big_struct():
x = hl.Struct(a0=0, a1=1, a2=2, a3=3, a4=4, a5=hl.Struct(b0='', b1='na', b2='nana', b3='nanana'))
expected = """
Struct(a0=0,
a1=1,
a2=2,
a3=3,
a4=4,
a5=Struct(b0='', b1='na', b2='nana', b3='nanana'))
""".strip()
assert pformat(x) == expected
def test_pformat_big_struct_in_small_struct():
x = hl.Struct(a5=hl.Struct(b0='', b1='na', b2='nana', b3='nanana', b5='ndasdfhjwafdhjskfdshjkfhdjksfhdsjk'))
expected = """
Struct(a5=Struct(b0='',
b1='na',
b2='nana',
b3='nanana',
b5='ndasdfhjwafdhjskfdshjkfhdjksfhdsjk'))
""".strip()
assert pformat(x) == expected
def test_pformat_big_struct_in_small_struct():
x = hl.Struct(a5=hl.Struct(b0='', b1='na', b2='nana', b3='nanana', b5='ndasdfhjwafdhjskfdshjkfhdjksfhdsjk'))
expected = """
Struct(a5=Struct(b0='',
b1='na',
b2='nana',
b3='nanana',
b5='ndasdfhjwafdhjskfdshjkfhdjksfhdsjk'))
""".strip()
assert pformat(x) == expected
def test_array_of_struct_all_identifier():
expected = """
[Struct(x=3243),
Struct(x=3243),
Struct(x=3243),
Struct(x=3243),
Struct(x=3243),
Struct(x=3243),
Struct(x=3243),
Struct(x=3243),
Struct(x=3243),
Struct(x=3243)]
""".strip()
assert pformat([hl.Struct(**{'x': 3243}) for _ in range(10)]) == expected
def test_array_of_struct_non_identifier():
expected = """
[Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123}),
Struct(**{'x': 3243, 'y ': 123})]
""".strip()
assert pformat([hl.Struct(**{'x': 3243, 'y ': 123}) for _ in range(10)]) == expected
|
[
"noreply@github.com"
] |
hail-is.noreply@github.com
|
b8e2268768855166460543c4a549ace91b2acd01
|
aa76391d5789b5082702d3f76d2b6e13488d30be
|
/Data Structure/collections/collections03.py
|
f37a8db3304c4250ee197efc507c772589a18b0b
|
[] |
no_license
|
B2SIC/python_playground
|
118957fe4ca3dc9395bc78b56825b9a014ef95cb
|
14cbc32affbeec57abbd8e8c4ff510aaa986874e
|
refs/heads/master
| 2023-02-28T21:27:34.148351
| 2021-02-12T10:20:49
| 2021-02-12T10:20:49
| 104,154,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,382
|
py
|
# namedtuple()
# 일반 튜플 방식에서는 필드가 많을 경우 각각의 순서가 어떠한 의미를 내포하는지 헷갈릴 수 있다.
# 따라서 이러한 필드에 이름을 지정해서 사용할 경우 이러한 문제를 방지할 수 있다.
# 사전을 사용할 수도 있지않을까? => 메모리를 좀 더 많이 사용하게된다.
# 이러한 부담을 줄일 수 있는 방법이 namedtuple 이다.
import collections
aa = ("홍길동", 24, "남")
print(aa)
bb = ("강복녀", 21, "여")
print(bb[0])
for n in [aa, bb]:
print("%s 은(는) %d세의 %s성 입니다." % n)
# namedtuple 의 사용
Person = collections.namedtuple("Person", "name age gender")
aa = Person(name = "강길동", age = 25, gender = "남")
bb = Person(name = "강복녀", age = 21, gender = "여")
for i in [aa, bb]:
print("%s 은(는) %d세의 %s성 입니다." % i)
print()
# OrderedDict : 자료의 순서를 기억하는 사전형 클래스
# 입력한 순서를 그대로 기억하고 있다.
# 그렇다면 파이썬에서 표준으로 제공하는 딕셔너리와 차이점은?
# => 표준 딕셔너리는 순서를 기억하지않지만 OrderedDict 는 순서를 기억한다.
dic = {}
dic["서울"] = "LG 트윈스"
dic["대구"] = "삼성 라이온즈"
dic["광주"] = "기아 타이거즈"
for i, j in dic.items():
print(i, j)
print("==================")
dic1 = collections.OrderedDict()
dic1["서울"] = "LG 트윈스"
dic1["대구"] = "삼성 라이온즈"
dic1["광주"] = "기아 타이거즈"
for i, j in dic1.items():
print(i, j)
print("< 비교를 이용한 표준 사전과 OrderedDict 의 차이점 >")
dic3 = {}
dic3["서울"] = "LG 트윈스"
dic3["대구"] = "삼성 라이온즈"
dic3["광주"] = "기아 타이거즈"
dic4 = {}
dic4["서울"] = "LG 트윈스"
dic4["광주"] = "기아 타이거즈"
dic4["대구"] = "삼성 라이온즈"
# 순서가 다름에도 True 값을 반환한다.
print(dic3 == dic4)
dic5 = collections.OrderedDict()
dic5["서울"] = "LG 트윈스"
dic5["대구"] = "삼성 라이온즈"
dic5["광주"] = "기아 타이거즈"
dic6 = collections.OrderedDict()
dic6["서울"] = "LG 트윈스"
dic6["광주"] = "기아 타이거즈"
dic6["대구"] = "삼성 라이온즈"
# OrderedDict 는 순서를 중요시 하기 때문에 순서가 다르면 False 값을 반환한다.
print(dic5 == dic6)
|
[
"the_basic_@kookmin.ac.kr"
] |
the_basic_@kookmin.ac.kr
|
014eebc0b47437ce3b6ed32e4e7644145307ee0e
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/sorting_20200614221804.py
|
aab218c7b33236836a8e08302cc90f7c2f007196
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
def wavesort(arr):
# sort the numbers first
numbers = sorted(arr)
# swap the numbers
n = len(numbers)
i = 0
print(numbers)
while i < n-1:
temp = numbers[i]
numbers[i] = numbers[i+1]
numbers[i+1] = temp
i+=2
#check if number at position i is smaller than its adjacent elements
result = None
j = 1
while j < n-1:
print(numbers[j],'<',numbers[j+1]," ",numbers[j],'<',numbers[i+1])
j +=2
print(numbers)
wavesort([3, 6, 5, 10, 7, 20])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
9f02551bf775ed8ec2d6884a17d21cc6099d7879
|
b3217e2bb6e72fbcb15df99b5c6c10ea4731a5b7
|
/sarctf/ppc/2.py
|
99590c8528f3d8868b0b0480a5fd96ab12992ed4
|
[] |
no_license
|
CrackerCat/ctf-6
|
5704de09eda187e111c7719c71e0a81c5d5c39e3
|
aa7846548451572fe54a380dc8d367a0132ad2ec
|
refs/heads/master
| 2023-01-28T06:18:01.764650
| 2020-12-07T12:05:20
| 2020-12-07T12:05:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
#!/usr/bin/python
#coding=utf-8
#__author__:TaQini
from pwn import *
p = remote('212.47.229.1', 33002)
context.log_level = 'debug'
se = lambda data :p.send(data)
sa = lambda delim,data :p.sendafter(delim, data)
sl = lambda data :p.sendline(data)
sla = lambda delim,data :p.sendlineafter(delim, data)
sea = lambda delim,data :p.sendafter(delim, data)
rc = lambda numb=4096 :p.recv(numb)
ru = lambda delims, drop=True :p.recvuntil(delims, drop)
uu32 = lambda data :u32(data.ljust(4, '\0'))
uu64 = lambda data :u64(data.ljust(8, '\0'))
info_addr = lambda tag, addr :p.info(tag + ': {:#x}'.format(addr))
def solve(ans='-1'):
ru('Message: ')
data = ru('\n')
ru('Answer: ')
print data
data = data.decode('rot13')
print data
if(ans!='-1'):
sl(ans)
else:
sl(str(data))
i = 0
while True:
solve()
print i
i+=1
# find flag after 100 times
# FLAG{Y0U_V3RY_F45T3R_CRYPT0GR4PH}
p.interactive()
|
[
"742954809@qq.com"
] |
742954809@qq.com
|
ffde862214cda7a2147f1869d2129ac1db5da852
|
6219e6536774e8eeb4cadc4a84f6f2bea376c1b0
|
/scraper/storage_spiders/babyclickvn.py
|
8b4bfb4c58c50080570a3fe83c35000202c62e4f
|
[
"MIT"
] |
permissive
|
nguyenminhthai/choinho
|
109d354b410b92784a9737f020894d073bea1534
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
refs/heads/master
| 2023-05-07T16:51:46.667755
| 2019-10-22T07:53:41
| 2019-10-22T07:53:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@id='content_detail']/div[@class='extra_title width880']",
'price' : "//div[@id='product_price']/div[@id='ctl00_ContentPlaceHolder1_dtpro_ctl00_product_price_right2']",
'category' : "",
'description' : "//div[@id='info_right']/div[@id='product_detail']",
'images' : "//div[@id='product_image']/div[@id='product_pic']/div[@id='wrap']/a/img/@src | //a[@class='cloud-zoom-gallery']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'babyclick.vn'
allowed_domains = ['babyclick.vn']
start_urls = ['http://babyclick.vn/Trang-chu/Cho-be.aspx']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['\*.aspx']), 'parse_item'),
Rule(LinkExtractor(deny=['/Khuyen-mai.aspx', '/Tin-tuc.aspx', '/Lien-he.aspx','/Gio-hang.aspx','/Gioi-thieu.aspx','/Cach-mua-hang.aspx','/Cach-thanh-toan.aspx']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
[
"nguyenchungthuy.hust@gmail.com"
] |
nguyenchungthuy.hust@gmail.com
|
09ede8ef76b80288854cd7a698b21384d4287691
|
411eff94020c192d5e5f657fa6012232ab1d051c
|
/extras/unused/goals/Goals.py
|
9edb3faad6ded548a68976b4d7a7778136b57789
|
[] |
no_license
|
xMakerx/cio-src
|
48c9efe7f9a1bbf619a4c95a4198aaace78b8491
|
60b2bdf2c4a24d506101fdab1f51752d0d1861f8
|
refs/heads/master
| 2023-02-14T03:12:51.042106
| 2021-01-15T14:02:10
| 2021-01-15T14:02:10
| 328,268,776
| 1
| 0
| null | 2021-01-15T15:15:35
| 2021-01-09T23:51:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
"""
Filename: Goals.py
Created by: blach (31Jan15)
"""
class Goal:
def __init__(self, data):
self.completed = False
self.reward = data.get("reward")
self.goalType = None
def getGoalType(self):
return self.goalType
def isCompleted(self):
return self.completed
def getReward(self):
return self.reward
def setReward(self, reward):
self.reward = reward
class CogGoal(Goal):
def __init__(self, data):
self.goalNum = data.get("goalNum")
self.goalCog = data.get("cog")
self.progress = data.get("progress")
Goal.__init__(self, data)
self.goalType = CIGlobals.Suit
def getCog(self):
return self.goalCog
def setCogProgress(self, numCogs):
self.progress = numCogs
def isCompleted(self):
return self.cogProgress >= self.cogGoal
def getCogProgress(self):
return self.progress
def getCogGoal(self):
return self.goalNum
class MinigameGoal(Goal):
def __init__(self, data):
self.event = data.get("event")
self.game = data.get("game")
self.value = data.get("value")
Goal.__init__(self, data)
self.goalType = CIGlobals.Minigame
def getEvent(self):
return self.event
def getGame(self):
return self.game
def getValue(self):
return self.value
|
[
"brianlach72@gmail.com"
] |
brianlach72@gmail.com
|
1edcdf5662fb9929ad20c46750e1aacfce2ec20c
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part009876.py
|
57a965a16500bcdc8e2f8d774f7276a11554e190
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,292
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher57870(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher57870._instance is None:
CommutativeMatcher57870._instance = CommutativeMatcher57870()
return CommutativeMatcher57870._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 57869
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
3a1dde9c5c50408db3863a86c8e88385a9feaa1b
|
23ec357d5df7addf06cb70c10ba9173521c70a9b
|
/core/migrations/0009_auto_20210617_1030.py
|
35a247c12f40532b5e495d2fc609f64c811b74cf
|
[] |
no_license
|
blimp666/d_job
|
b8e8b93ef6b94e24a38bd94195a779bfff7f3c30
|
18904ac12af6593bf59b1ba379f722bd69d00863
|
refs/heads/main
| 2023-06-07T21:50:34.596128
| 2021-06-22T11:15:20
| 2021-06-23T19:36:48
| 376,893,878
| 0
| 0
| null | 2021-06-15T19:30:46
| 2021-06-14T16:48:17
|
Python
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
# Generated by Django 3.2.4 on 2021-06-17 10:30
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20210617_1019'),
]
operations = [
migrations.RenameField(
model_name='application',
old_name='application',
new_name='conference',
),
migrations.AlterField(
model_name='conference',
name='date_start',
field=models.DateField(default=datetime.datetime(2021, 6, 17, 10, 30, 0, 482699), verbose_name='Дата проведения'),
),
]
|
[
"email@example.com"
] |
email@example.com
|
189fc2f6b216183bc3e8e1bb343a24c9669444f0
|
af84faa0eedb07725755b89e1ce254ed2317286c
|
/Basics/04_Elements/Distance2Grid.py
|
cda319b3d75b274e8d209901c3c8ba90b3d62d0c
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
ambaamba/Examples
|
cdc59a0db40097e9401941facb88e09f41f96abf
|
88cfc58ca0ce81e70bb3fde964b11d8a094bddf8
|
refs/heads/master
| 2020-12-23T10:39:33.696844
| 2019-12-17T11:23:21
| 2019-12-17T11:23:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,078
|
py
|
#!/usr/bin/env python3
# -----------------------------------------------------------------------------
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
#
# P A G E B O T
#
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# TextBoxSideWHConditions.py
#
# Position fixed size textbox elements by their page side with conditions
#
# Document is the main instance holding all information about
# the document togethers (pages, styles, etc.)
from pagebot.document import Document
from pagebot.elements import newTextBox
from pagebot.toolbox.units import p, pt
from pagebot.toolbox.color import color, whiteColor, blackColor
from pagebot.conditions import *
from pagebot.constants import *
from pagebot.fonttoolbox.objects.font import findFont
W = H = pt(500)
PADDING = pt(4*12)
w = pt(8*12)
doc = Document(w=W, h=H, originTop=False)
page = doc[1] # Get the single page from te document.
page.padding = PADDING
page.baselineGrid = pt(24)
page.baselineGridStart = PADDING * 1.5
page.showBaselineGrid = True
page.showPadding = True
page.showOrigin = True
font = findFont('PageBot Regular')
def getText(s):
style1 = dict(font=font, fontSize=36, leading=pt(40),
textFill=whiteColor, xTextAlign=CENTER)
style2 = dict(font=font, fontSize=10, leading=pt(12),
textFill=blackColor, xTextAlign=CENTER)
t = doc.context.newString('TEXT', style=style1)
t += doc.context.newString('\n'+s, style=style2)
return t
e1 = newTextBox(getText('e1 Bottom2Bottom'),
parent=page, fill=color('red'),
showOrigin=True, conditions=[Left2Left(), Bottom2Bottom()])
e2 = newTextBox(getText('e2 Middle2Middle'),
parent=page, fill=color('orange'),
showOrigin=True, conditions=[Left2Left(), Middle2Middle()])
e3 = newTextBox(getText('e3 Top2Top'), parent=page,
fill=color('yellow').darker(0.8),
showOrigin=True, conditions=[Left2Left(), Top2Top()])
e4 = newTextBox(getText('e4 Bottom y on grid'),
parent=page, fill=color('red'),
showOrigin=True, conditions=[Center2Center(), Bottom2Bottom()])
e5 = newTextBox(getText('e5 Bottom y on grid'),
parent=page, fill=color('orange'),
showOrigin=True, conditions=[Center2Center(), Middle2Middle()])
e6 = newTextBox(getText('e6 Bottom y on grid'),
parent=page, fill=color('yellow').darker(0.8),
showOrigin=True, conditions=[Center2Center(), Top2Top()])
e7 = newTextBox(getText('e7 Top y on grid'),
parent=page, fill=color('red'), yAlign=TOP,
showOrigin=True, conditions=[Right2Right(), Bottom2Bottom()])
e8 = newTextBox(getText('e8 Top y on grid'),
parent=page, fill=color('orange'), yAlign=TOP,
showOrigin=True, conditions=[Right2Right(), Middle2Middle()])
e9 = newTextBox(getText('e9 Top y on grid'),
parent=page, fill=color('yellow').darker(0.8), yAlign=TOP,
showOrigin=True, conditions=[Right2Right(), Top2Top()])
page.solve()
e4.y += e4.distance2Grid
e5.y += e5.distance2Grid
e6.y += e6.distance2Grid
e7.y += e7.distance2Grid
e8.y += e8.distance2Grid
e9.y += e9.distance2Grid
page = page.next
page.padding = PADDING
page.baselineGrid = pt(24)
page.baselineGridStart = PADDING * 1.5
page.showBaselineGrid = True
page.showPadding = True
page.showOrigin = True
e1 = newTextBox(getText('Middle y on grid'), parent=page,
fill=color('red'), yAlign=MIDDLE, showOrigin=True,
conditions=[Left2Left(), Bottom2Bottom(), Baseline2Grid()])
e2 = newTextBox(getText('Middle y on grid'), parent=page,
fill=color('orange'), yAlign=MIDDLE, showOrigin=True,
conditions=[Left2Left(), Middle2Middle(), Baseline2Grid()])
e3 = newTextBox(getText('Middle y on grid'), parent=page,
fill=color('yellow').darker(0.8), yAlign=MIDDLE, showOrigin=True,
conditions=[Left2Left(), Top2Top(), Baseline2Grid()])
page.solve()
e1.y += e1.distance2Grid
e2.y += e2.distance2Grid
e3.y += e3.distance2Grid
page = page.next
page.padding = PADDING
page.baselineGrid = pt(24)
page.baselineGridStart = PADDING * 1.5
page.showBaselineGrid = True
page.showPadding = True
page.showOrigin = True
e1 = newTextBox(getText('Baseline2Grid'), parent=page, showBaselineGrid=True,
fill=color('red'), yAlign=MIDDLE, showOrigin=True,
conditions=[Left2Left(), Bottom2Bottom(), Baseline2Grid()])
e2 = newTextBox(getText('Baseline2Grid'), parent=page, showBaselineGrid=True,
fill=color('orange'), yAlign=MIDDLE, showOrigin=True,
conditions=[Left2Left(), Middle2Middle(), Baseline2Grid()])
e3 = newTextBox(getText('Baseline2Grid'), parent=page, showBaselineGrid=True,
fill=color('yellow').darker(0.8), yAlign=MIDDLE, showOrigin=True,
conditions=[Left2Left(), Top2Top(), Baseline2Grid()])
e4 = newTextBox(getText('BaselineUp2Grid'), parent=page,
fill=color('red'), showOrigin=True,
conditions=[Center2Center(), Bottom2Bottom(), BaselineUp2Grid()])
e5 = newTextBox(getText('BaselineUp2Grid'), parent=page,
fill=color('orange'), showOrigin=True,
conditions=[Center2Center(), Middle2Middle(), BaselineUp2Grid()])
e6 = newTextBox(getText('BaselineUp2Grid'), parent=page,
fill=color('yellow').darker(0.8), showOrigin=True,
conditions=[Center2Center(), Top2Top(), BaselineUp2Grid()])
e7 = newTextBox(getText('BaselineDown2Grid'), parent=page,
fill=color('red'), yAlign=TOP, showOrigin=True,
conditions=[Right2Right(), Bottom2Bottom(), BaselineDown2Grid()])
e8 = newTextBox(getText('BaselineDown2Grid'), parent=page,
fill=color('orange'), yAlign=TOP, showOrigin=True,
conditions=[Right2Right(), Middle2Middle(), BaselineDown2Grid()])
e9 = newTextBox(getText('BaselineDown2Grid'), parent=page,
fill=color('yellow').darker(0.8), yAlign=TOP, showOrigin=True,
conditions=[Right2Right(), Top2Top(), BaselineDown2Grid()])
page.solve()
# Export in _export folder that does not commit in Git. Force to export PDF.
EXPORT_PATH = '_export/Distance2Grid.pdf'
doc.export(EXPORT_PATH)
|
[
"buro@petr.com"
] |
buro@petr.com
|
a3f2d11981c17034f2fa94d45194dd6fc7f631bd
|
4e2117a4381f65e7f2bb2b06da800f40dc98fa12
|
/026_mobile-deeplabv3-plus/01_float32/01_image_to_npy.py
|
9c53b65419559184884b93a4f706df629e353ed8
|
[
"AGPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
PINTO0309/PINTO_model_zoo
|
84f995247afbeda2543b5424d5e0a14a70b8d1f1
|
ff08e6e8ab095d98e96fc4a136ad5cbccc75fcf9
|
refs/heads/main
| 2023-09-04T05:27:31.040946
| 2023-08-31T23:24:30
| 2023-08-31T23:24:30
| 227,367,327
| 2,849
| 520
|
MIT
| 2023-08-31T23:24:31
| 2019-12-11T13:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
from PIL import Image
import os, glob
import numpy as np
dataset = []
files = glob.glob("JPEGImages/*.jpg")
for file in files:
image = Image.open(file)
image = image.convert("RGB")
data = np.asarray(image)
dataset.append(data)
dataset = np.array(dataset)
np.save("person_dataset", dataset)
|
[
"rmsdh122@yahoo.co.jp"
] |
rmsdh122@yahoo.co.jp
|
c7cbe123ca4dc453acc9c9f66bc343aadfbcb3c6
|
f0755c0ca52a0a278d75b76ee5d9b547d9668c0e
|
/atcoder.jp/sumitrust2019/sumitb2019_c/Main.py
|
c9aee86e614b37fe0a6f1b4a03b4dc524557edd8
|
[] |
no_license
|
nasama/procon
|
7b70c9a67732d7d92775c40535fd54c0a5e91e25
|
cd012065162650b8a5250a30a7acb1c853955b90
|
refs/heads/master
| 2022-07-28T12:37:21.113636
| 2020-05-19T14:11:30
| 2020-05-19T14:11:30
| 263,695,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
X = int(input())
a = [100, 101, 102, 103, 104, 105]
dp = [[False]*100010 for _ in range(10)]
dp[0][0] = True
for i in range(6):
for j in range(X+1):
dp[i+1][j] |= dp[i][j]
if j >= a[i]:
dp[i+1][j] |= dp[i][j-a[i]]
dp[i+1][j] |= dp[i+1][j-a[i]]
if dp[6][X]:
print(1)
else:
print(0)
|
[
"g1620535@is.ocha.ac.jp"
] |
g1620535@is.ocha.ac.jp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.