blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68f625480165088ba501a0f79424f9723973a8bb | c7821b133e31e97af6e1aec75c1fd3039b56356f | /Class1/Ex1-3.py | 17c29ca8f15d17385c5f0d3b8449980afa92128f | [] | no_license | ksannedhi/kbyers-network-automation | b9089c28104f4c590ca33690e548c4d44331cdab | a4afa54e7b323c5529033c842db2b603f4dabf34 | refs/heads/master | 2022-11-22T23:41:32.245143 | 2020-07-19T14:44:43 | 2020-07-19T14:44:43 | 275,795,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | '''For one of the Cisco IOS devices, use Netmiko and the send_command() method to retrieve 'show version'.
Save this output to a file in the current working directory.'''
from netmiko import ConnectHandler
nxos1_dict = {"host": "nxos1.lasthop.io", "username": "pyclass", "password": "88newclass", "device_type": "cisco_nxos"}
nxos1_connect = ConnectHandler(**nxos1_dict)
sh_ver_op = nxos1_connect.send_command("sh ver")
nxos1_connect.disconnect()
with open("show_ver-1.txt", "w") as f:
f.write(sh_ver_op)
| [
"noreply@github.com"
] | ksannedhi.noreply@github.com |
414116c661e7742e368acf7fbaf2b0329a80b438 | f87ab2e8406e31a27ae0795ea9822f87e8e96181 | /cloudy/sys/firewall.py | ba527f4a09575823bd7e085667a75708a357c3a0 | [
"BSD-2-Clause"
] | permissive | hungld/python-cloudy | 87e67cce0d25b9e91f6599992b64b5c134a8daa1 | 3d5d7b890b65d1951ee6162bf71ef1355d98df21 | refs/heads/master | 2021-01-18T11:10:57.100134 | 2012-11-16T16:49:03 | 2012-11-16T16:49:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,743 | py | import os
import re
import sys
from fabric.api import run
from fabric.api import task
from fabric.api import sudo
from fabric.api import put
from fabric.api import env
from fabric.api import settings
from fabric.api import hide
from fabric.contrib import files
from fabric.utils import abort
from cloudy.sys.etc import sys_etc_git_commit
def sys_firewall_install():
""" Install filrewall application - Ex: (cmd)"""
sudo('apt-get -y install ufw')
sys_etc_git_commit('Installed firewall (ufw)')
def sys_firewall_secure_server():
""" Secure the server right away - Ex: (cmd)"""
sudo('ufw logging on')
sudo('ufw default deny incoming; ufw default allow outgoing')
sudo('ufw allow ssh')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
sys_etc_git_commit('Server is secured down')
def sys_firewall_wideopen():
""" Open up firewall, the server will be wide open - Ex: (cmd)"""
sudo('ufw default allow incoming; ufw default allow outgoing')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_disable():
""" Disable firewall, the server will be wide open - Ex: (cmd)"""
sudo('ufw disable; sudo ufw status verbose')
def sys_firewall_allow_incoming_http():
""" Allow http (port 80) requests to this server - Ex: (cmd)"""
sudo('ufw allow http')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_disallow_incoming_http():
""" Disallow http (port 80) requests to this server - Ex: (cmd)"""
sudo('ufw delete allow http')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_allow_incoming_https():
""" Allow http (port 443) requests to this server - Ex: (cmd)"""
sudo('ufw allow https')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_disallow_incoming_https():
""" Disallow http (port 443) requests to this server - Ex: (cmd)"""
sudo('ufw delete allow https')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_allow_incoming_postgresql():
""" Allow postgresql (port 5432) requests to this server - Ex: (cmd)"""
sudo('ufw allow postgresql')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_disallow_incoming_postgresql():
""" Disallow postgresql (port 5432) requests to this server - Ex: (cmd)"""
sudo('ufw delete allow postgresql')
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_allow_incoming_port(port):
""" Allow requests on specific port to this server - Ex: (cmd:<port>)"""
sudo('ufw allow {0}}'.format(port))
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_disallow_incoming_port(port):
""" Disallow requests to this server on specific port - Ex: (cmd:<port>)"""
sudo('ufw delete allow {0}'.format(port))
with settings(warn_only=False):
sudo('ufw delete allow {0}/tcp'.format(port))
sudo('ufw delete allow {0}/udp'.format(port))
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_allow_incoming_port_proto(port, proto):
""" Allow requests on specific port to this server - Ex: (cmd:<port>,<proto>)"""
sudo('ufw allow {0}/{1}'.format(port, proto))
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
def sys_firewall_disallow_incoming_port_proto(port, proto):
""" Disallow requests to this server on specific port - Ex: (cmd:<port>,<proto>)"""
sudo('ufw delete allow {0}/{1}'.format(port, proto))
sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose')
| [
"val@neekware.com"
] | val@neekware.com |
8a18808e80ccfc2d11a0dacbbdbec9e698cd0994 | d93159d0784fc489a5066d3ee592e6c9563b228b | /DQM/L1TMonitorClient/python/L1TDTTPGClient_cfi.py | 6f9bbabe85a1656fb33ac3c167cf3f3122dc4c5a | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 296 | py | import FWCore.ParameterSet.Config as cms
l1tDttpgClient = cms.EDAnalyzer("L1TDTTPGClient",
input_dir = cms.untracked.string('L1T/L1TDTTPG'),
prescaleLS = cms.untracked.int32(-1),
output_dir = cms.untracked.string('L1T/L1TDTTPG/Tests'),
prescaleEvt = cms.untracked.int32(500)
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
4fc91c8159b26f0bad70d6b396491ec89a0bc835 | ed1e81a2325d310de7961274a06bfe6cdb7993d0 | /tkinter_gui/chapter04/pmw_scrolledtext.py | 3a00239bf8917c7f58afc7b27322d8488fce0f72 | [] | no_license | fahimkhan/python | ce573298adf30ca8426b74f3ab275ab7f8047a91 | 1733ad39cf214362c8a76f8996740715888d2101 | refs/heads/master | 2021-01-15T15:50:27.323739 | 2016-08-24T11:02:56 | 2016-08-24T11:02:56 | 20,254,607 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from Tkinter import *
import Pmw
root = Tk()
root.option_readfile('optionDB')
root.title('ScrolledText')
Pmw.initialise()
st = Pmw.ScrolledText(root, borderframe=1, labelpos=N,
label_text='Blackmail', usehullsize=1,
hull_width=400, hull_height=300,
text_padx=10, text_pady=10,
text_wrap='none')
st.importfile('blackmail.txt')
st.pack(fill=BOTH, expand=1, padx=5, pady=5)
root.mainloop()
| [
"fahim.elex@gmail.com"
] | fahim.elex@gmail.com |
9ef585a165fb22a7fcdf893785c856234edb2aed | c3483984a4782be6097e4753de3cb545ae00039b | /geneticTSP/Population.py | 9d515032d5456dcdd5c41cef98ee75a4773b00fd | [] | no_license | nghiemphan93/machineLearning | 67c3f60f317a0c753b465751113511baaefd1184 | 36d214b27c68d399f5494b5ec9b28fee74d57f7f | refs/heads/master | 2020-03-28T02:20:11.843154 | 2020-02-03T14:18:39 | 2020-02-03T14:18:39 | 147,563,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | from geneticTSP.DNA import DNA
from geneticTSP.City import City
from typing import List
import random
class Population:
"""
Representation of each generation holding n individual DNAs
"""
def __init__(self, numbCities: int, mutationRate: float, populationSize: int, eliteRate: float):
self.eliteRate = eliteRate
self.rouletteWheel: List[DNA] = []
self.generations: int = 0
self.finished: bool = False
self.numbCities = numbCities
self.mutationRate = mutationRate
self.perfectScore = 1.0
self.bestDNA: DNA = None
self.cityList: List[City] = self.createRoute(self.numbCities)
self.population: List[DNA] = self.createPopulation(populationSize)
self.calcFitness()
def createRoute(self, numbCities: int) -> List[City]:
"""
Randomly create a demo list of cities with x, y coordinates range from 50 to 700
:param numbCities:
:return:
"""
cityRoute = []
for i in range(numbCities):
cityRoute.append(City(x=random.randint(50, 700),
y=random.randint(50, 700)))
return cityRoute
def createPopulation(self, populationSize: int) -> List[DNA]:
"""
Create all individual DNAs for a generation
:param populationSize: int
:return: population: List[DNA]
"""
population = []
for i in range(populationSize):
population.append(DNA(route=self.cityList))
return population
def calcFitness(self) -> None:
"""
Calculate fitness of each DNA inversely proportional to total distances
:return: None
"""
for i in range(len(self.population)):
self.population[i].calcFitness()
def calcDistances(self) -> None:
"""
Calculate distances of every DNA
:return: None
"""
for i in range(len(self.population)):
self.population[i].calcTotalDist()
def naturalSelection(self) -> None:
"""
Simulate a roulette wheel of all possible DNA candidates to do cross over for next generation
:return: None
"""
self.rouletteWheel = []
maxFitness = 0
minFitness = 1
for dna in self.population:
if dna.fitness > maxFitness:
maxFitness = dna.fitness
if dna.fitness < minFitness:
minFitness = dna.fitness
for dna in self.population:
# scale fitness down to 0-1
fitness = (dna.fitness - minFitness) / (maxFitness - minFitness)
n = int(fitness * 100)
for i in range(n):
self.rouletteWheel.append(dna)
def rankPopulation(self) -> None:
"""
Sort the population ascending by distance
:return: None
"""
self.calcDistances()
self.population.sort(key=lambda route: route.distances)
def generate(self) -> None:
"""
Pick parents from roulette wheel to create new generation
:return: None
"""
self.rankPopulation()
eliteSize = int(len(self.population) * self.eliteRate)
for i in range(eliteSize, len(self.population)):
indexA = int(random.random() * len(self.rouletteWheel))
indexB = int(random.random() * len(self.rouletteWheel))
partnerA = self.rouletteWheel[indexA]
partnerB = self.rouletteWheel[indexB]
child: DNA = partnerA.crossover(partnerB)
child.mutate(self.mutationRate)
self.population[i] = child
self.generations += 1
def getBestFitness(self) -> float:
"""
Get fitness of the best fitted DNA
:return: bestFitness: float
"""
bestFitness = 0.0
for i in range(len(self.population)):
if self.population[i].fitness > bestFitness:
bestFitness = self.population[i].fitness
return bestFitness
def getBestDistance(self) -> float:
"""
Get distance of the best fitted DNA
:return:bestDistance: float
"""
bestDistance = 999999
for i in range(len(self.population)):
totalDist = self.population[i].calcTotalDist()
if totalDist < bestDistance:
bestDistance = totalDist
return bestDistance
def evaluate(self) -> None:
"""
Evaluate the process if best solution found
:return: None
"""
bestFitness = 0.0
indexBestDNA = 0
for i in range(len(self.population)):
if self.population[i].fitness > bestFitness:
indexBestDNA = i
bestFitness = self.population[i].fitness
self.bestDNA = self.population[indexBestDNA]
if bestFitness == self.perfectScore:
self.finished = True
def isFinished(self):
return self.finished
| [
"nghiemphan93@gmail.com"
] | nghiemphan93@gmail.com |
15b961666c94cb06e2715260c0de947141988ca1 | e6611443e946d1129985a95bc2dd2afc610f8292 | /CMS/apps/core/management/commands/data_migrate.py | 6441399ad29970086e46beb79479401f3eeca210 | [] | no_license | Indus-Action/Campaign-Management-System | a761dd9bbc7967f8302bb3283230f87ccc2bd2a6 | 9c6f1193ff897b8cc53f2a1c3bca8d70a890e70f | refs/heads/master | 2020-03-12T19:49:19.329764 | 2018-05-15T06:37:41 | 2018-05-15T06:37:41 | 130,792,314 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,062 | py | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from tags.models import Tag
from stages.models import Stage
from user_profiles.models import UserProfile
from django.contrib.auth.models import User
from vms2.settings.base import BASE_DIR
from forms.models import PersistentForm, PersistentFormData
import json
import csv
import traceback
import pdb
from datetime import datetime
class Command(BaseCommand):
help = 'Does some magical work'
def handle(self, *args, **options):
with open('/home/ubuntu/migrate_6Dec.csv') as f:
rows = csv.reader(f, quotechar="'", delimiter='$')
count = 0
count_success = 0
pf = PersistentForm.objects.last()
count = 0
max_count = 0
for row in rows:
count += 1
print count
mobile = row[42]
user = None
user_profile = None
if len(mobile) == 11:
mobile = mobile[1:]
if len(mobile) != 10:
continue
try:
user = User.objects.get(username=mobile)
user_profile, profile_created = UserProfile.objects.get_or_create(mobile=mobile)
if profile_created:
user.profile = user_profile
user.save()
user_profile.user = user
user_profile.save()
except:
user_profile, profile_created = UserProfile.objects.get_or_create(mobile=mobile)
if user_profile.user:
user = user_profile.user
else:
user, user_created = User.objects.get_or_create(username=mobile)
user_profile.user = user
user_profile.save()
try:
if user and user_profile:
pfd, pfd_created = PersistentFormData.objects.get_or_create(form=pf, beneficiary=user)
data = pfd.data
father_details = {
"Name": row[12],
"Occupation": row[39],
"Mobile": row[42]
}
data['Father Details'] = father_details
mother_details = {
"Name": row[47],
"Occupation": row[27],
"Mobile": row[2]
}
data['Mother Details'] = mother_details
data['Category'] = row[37]
children = []
if 'Children' in data.keys():
children = data['Children']
else:
children = []
if row[28]:
child = {}
child['Name'] = row[28]
child['Gender'] = row[21]
if row[51]:
try:
child['Date of Birth'] = datetime.strptime(row[51], '%Y-%m-%dT%H:%M:%S.%fZ').date().isoformat()
except:
pass
children.append(child)
data['Children'] = children
documentation = {}
birth = {}
if row[0]:
birth = json.loads(row[0])
address = {}
if row[40]:
print row
address = json.loads(row[40])
category_dict = {}
if row[15]:
category_dict = json.loads(row[15])
income = {}
if row[48]:
print row
income = json.loads(row[48])
orphan = {}
if row[4]:
orphan = json.loads(row[4])
disabled = {}
if row[7]:
disabled = json.loads(row[7])
if address:
address_dict = {}
for key in address.keys():
if key == 'aadhar' and address[key]:
address_dict['Aadhar Card'] = 1
if key == 'ration' and address[key]:
address_dict['Ration Card'] = 1
if key == 'voter' and address[key]:
address_dict['Voter Card'] = 1
if key == 'license' and address[key]:
address_dict['License'] = 1
if key == 'bill' and address[key]:
address_dict['Electricity/Water Bill'] = 1
documentation['Address Proof'] = address_dict
if birth:
birth_dict = {}
for key in birth.keys():
if key == 'hospital' and birth[key]:
birth_dict['Hospital Record'] = 1
if key == 'anganwadi' and birth[key]:
birth_dict['Anganwadi Record'] = 1
if key == 'birth' and birth[key]:
birth_dict['Birth Certificate'] = 1
if key == 'self' and birth[key]:
birth_dict['Self Affidavit'] = 1
documentation['Birth Certificate'] = birth_dict
if income:
income_dict = {}
for key in income.keys():
if key == 'food' and income[key]:
income_dict['Food Security Card (GREEN)'] = 1
if key == 'ration' and income[key]:
income_dict['AAY Ration Card (PINK)'] = 1
if key == 'income' and income[key]:
income_dict['Income Certificate'] = 1
if key == 'bpl' and income[key]:
income_dict['BPL Card (YELLOW)'] = 1
documentation['Income Certificate'] = income_dict
if orphan:
orphan_dict = {}
for key in orphan.keys():
if key == 'orphan' and orphan[key]:
orphan_dict['CWC Certificate'] = 1
documentation['Orphan Certificate'] = orphan_dict
if disabled:
disabled_dict = {}
for key in disabled.keys():
if key == 'medical' and disabled[key]:
orphan_dict['Govt. Medical Certificate'] = 1
documentation['Disability Certificate'] = orphan_dict
data['Documentation'] = documentation
pfd.data = data
print pfd.data
pfd.save()
print pfd.id
except Exception, e:
print count
print(traceback.format_exc())
break
| [
"madhav.malhotra3089@gmail.com"
] | madhav.malhotra3089@gmail.com |
f00f9437013d1dc49a3fe3ec5fb6d6b921468ed1 | c77c2a16fb7eb13cc0d3364117bd47f9d8c7a4c6 | /toga_cocoa/libs/foundation.py | 0868fa4eea9bf3862f985905d688b322a253e155 | [
"BSD-3-Clause"
] | permissive | AnthonyBriggs/toga-cocoa | ec643cc625fb1037ae08e5b74169c1db367f17bb | c0ae161acf4064ab5d4e3e8a7ea6f34ed85054c5 | refs/heads/master | 2021-01-12T20:39:14.142452 | 2016-08-18T02:05:40 | 2016-08-18T02:05:40 | 65,788,763 | 0 | 0 | null | 2016-08-16T04:56:31 | 2016-08-16T04:56:31 | null | UTF-8 | Python | false | false | 488 | py | from ctypes import *
from ctypes import util
from rubicon.objc import *
######################################################################
# FOUNDATION
foundation = cdll.LoadLibrary(util.find_library('Foundation'))
foundation.NSMouseInRect.restype = c_bool
foundation.NSMouseInRect.argtypes = [NSPoint, NSRect, c_bool]
# NSArray.h
NSMutableArray = ObjCClass('NSMutableArray')
# NSURL.h
NSURL = ObjCClass('NSURL')
# NSURLRequest.h
NSURLRequest = ObjCClass('NSURLRequest')
| [
"russell@keith-magee.com"
] | russell@keith-magee.com |
204350a3ebcab223f47e16a0b1d20007ea53b18f | 25b981906ed4de4020f0318bc27da67199e23e43 | /mirna/cox/GBM/patient_info.py | b10dc70e6030438b937a98fe35b7085368be2037 | [
"MIT"
] | permissive | anyone1985/onco_lnc | a0f44dd1100c07a5dc289f580a376fde9668a067 | d174d79ba4af1647cde4fea8f43a04d7afc756af | refs/heads/master | 2021-01-18T05:25:12.339778 | 2016-02-26T02:42:54 | 2016-02-26T02:42:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,874 | py | ## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_gbm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','clinical','nationwidechildrens.org_clinical_patient_gbm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the miRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
##only microarray data was available for GBM
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','FILE_SAMPLE_MAP_mirna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mirna={}
for i in data:
## The normalized data files are used
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mirna[x]=TCGA_to_mirna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mirna.has_key(i[0]):
## The miRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[miRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mirna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
| [
"jordananaya@gmail.com"
] | jordananaya@gmail.com |
0458f4ee0efc1ec610b7c8aa894478dd403f5fff | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02383/s889673780.py | 24a1bcfb649681f699c00a423a5b13ef1e36ceb6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | dice = {'N':(2, 6, 3, 4, 1, 5),'E':(4, 2, 1, 6, 5, 3),'W':(3, 2, 6, 1, 5, 4),'S':(5, 1, 3, 4, 6, 2)}
num = input().split()
for alf in input():
num = [num[i-1]for i in dice[alf]]
print(num[0])
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a9e7be52d82f906175434de07df4c6a153d6269b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2138/60624/269204.py | 00c53ecb569b0dee748ed76515975bbf1c81cf41 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | def func25():
temp = input()
flag = False
if temp.__contains__("0,0"):
flag = True
if not flag:
nums = list(map(int, temp.split(",")))
k = int(input())
if k == 0:
flag = True
if not flag:
for i in range(0,len(nums)):
if flag:
break
Sum = nums[i]
for j in range(i+1,len(nums)):
Sum += nums[j]
if Sum%k == 0:
flag = True
break
print(flag)
return
func25() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
231cdda5cb118f53e77b328b5ee3b10fa7e01de7 | 3a6732512d675cc4939b5da4f61aed523b2064dd | /noxfile.py | bea019eed448935451ca244b1d6a77eb31598ebd | [
"Apache-2.0"
] | permissive | vam-google/python-recommender | b1bfc8a0aa4730c2c1181577960dfcd4eee350ea | 255fc62897890b6e0a73c0e147eb930eadbaaddd | refs/heads/master | 2022-05-24T11:21:23.278760 | 2020-04-28T21:43:19 | 2020-04-28T21:43:19 | 254,788,427 | 0 | 0 | Apache-2.0 | 2020-04-11T03:53:37 | 2020-04-11T03:53:37 | null | UTF-8 | Python | false | false | 4,838 | py | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import shutil
import nox
BLACK_VERSION = "black==19.3b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
if os.path.exists("samples"):
BLACK_PATHS.append("samples")
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run("black", "--check", *BLACK_PATHS)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install(BLACK_VERSION)
session.run("black", *BLACK_PATHS)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud.recommender",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["3.5", "3.6", "3.7", "3.8"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=80")
session.run("coverage", "erase")
@nox.session(python="3.7")
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| [
"noreply@github.com"
] | vam-google.noreply@github.com |
a68b097dc909d88cd7b7acf31c13504aa8b4d37c | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /class/class메소드오버라이딩.py | b6ce0a16350a90ec65a746459f7ab4a867a3fdd0 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | # 일반 유닛
class Unit:
def __init__(self, name, hp, speed):
self.name = name
self.hp = hp
self.speed = speed
def move(self, location):
print("[지상 유닛 이동]")
print("{0} : {1} 방향으로 이동합니다. [속도 {2}]"
.format(self.name, location, self.speed))
# 공격 유닛
class AttackUnit(Unit):
def __init__(self, name, hp, speed, damage):
Unit.__init__(self, name, hp, speed)
self.damage = damage
def attack(self, location): # 클래스 내에서 메소드 앞에는 항상 self를 적어주어야 한다.
print("{0} : {1} 방향으로 적군을 공격 합니다. [공격력 {2}]"
.format(self.name, location, self.damage))
def damaged(self, damage):
print("{0} : {1} 데미지를 입었습니다.".format(self.name, damage))
self.hp -= damage
print("{0} : 현재 체력은 {1}입니다.".format(self.name, self.hp))
if self.hp <= 0:
print("{0} : 파괴되었습니다.".format(self.name))
# 드랍쉽 : 공중 유닛, 수송기. 마린 / 파이어뱃 / 탱크 등을 수송. 공격 X
# 날 수 있는 기능을 가진 클래스
class Flyable:
def __init__(self, flying_speed):
self.flying_speed = flying_speed
def fly(self, name, location):
print("{0} : {1} 방향으로 날아갑니다. [속도 {2}]"
.format(name, location, self.flying_speed))
# 공중 공격 유닛 클래스
class FlyableAttackUnit(AttackUnit, Flyable):
def __init__(self, name, hp, damage, flying_speed):
AttackUnit.__init__(self, name, hp, 0, damage) # 지상 speed 0
Flyable.__init__(self, flying_speed)
def move(self, location):
print("[공중 유닛 이동]")
self.fly(self.name, location)
# 벌쳐 : 지상 유닛, 기동성이 좋음
vulture = AttackUnit("벌쳐", 80, 10, 20)
# 배틀크루저 : 공중 유닛, 체력도 굉장히 좋음, 공격력도 좋음.
battlecruiser = FlyableAttackUnit("배틀크루저", 500, 25, 3)
vulture.move("11시")
# battlecruiser.fly(battlecruiser.name, "9시")
battlecruiser.move("9시")
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
f6c1e40a8c0f5a20f74b65a39b254bf1173513f1 | 0a73cdfd4c80128ba26aecf498042842273f914a | /system/database/BusinessModel.py | 079b70759eae067f22813177dcef6d4b8d33672d | [] | no_license | philgookang/patty | d654758b8b1b608f9bcfab2081c94ea67036e3bf | 8c976350037ed707ec2e340c25e821020f96fcd3 | refs/heads/master | 2020-03-08T04:19:50.811568 | 2018-08-26T09:14:02 | 2018-08-26T09:14:02 | 127,918,010 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 254 | py |
class BusinessModel:
def extend(self, data):
if data:
try:
for key,val in data.items():
setattr(self, str(key), val)
except AttributeError as error:
pass
| [
"philgookang@gmail.com"
] | philgookang@gmail.com |
34c0c1c9a9531fcb85d060cd3d1cb72263c06f0a | 1144c9c264c24f0e18c5ebe0990b055a29cd975d | /test/dist_utils.py | 0654d0d253217d6efafe16e65273e627b1eb0258 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | juliakreutzer/pytorch | 476bfd0ddc6ad4d6bd7b14322d63b808d291b7da | 0341546bcad668075fc58224e444bc66a0a597b2 | refs/heads/master | 2020-09-14T22:11:50.464769 | 2019-11-21T21:59:35 | 2019-11-21T21:59:35 | 223,272,050 | 1 | 0 | NOASSERTION | 2019-11-21T21:59:36 | 2019-11-21T21:51:55 | null | UTF-8 | Python | false | false | 5,071 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import threading
from functools import partial, wraps
import torch.distributed as dist
import torch.distributed.rpc as rpc
if not dist.is_available():
print("c10d not available, skipping tests")
sys.exit(0)
class TestConfig:
__slots__ = ["rpc_backend_name", "build_rpc_agent_options"]
def __init__(self, *args, **kwargs):
assert len(args) == 0, "TestConfig only takes kwargs."
for k, v in kwargs.items():
setattr(self, k, v)
TEST_CONFIG = TestConfig()
INIT_METHOD_TEMPLATE = "file://{file_name}"
MASTER_RANK = 0
_ALL_NODE_NAMES = set()
_DONE_NODE_NAMES = set()
_TERMINATION_SIGNAL = threading.Event()
def on_master_follower_report_done(worker_name):
assert (
worker_name in _ALL_NODE_NAMES
), "{worker_name} is not expected by master.".format(worker_name=worker_name)
assert (
worker_name not in _DONE_NODE_NAMES
), "{worker_name} report done twice.".format(worker_name=worker_name)
_DONE_NODE_NAMES.add(worker_name)
if _ALL_NODE_NAMES != _DONE_NODE_NAMES:
return
set_termination_signal()
def set_termination_signal():
assert not _TERMINATION_SIGNAL.is_set(), "Termination signal got set twice."
_TERMINATION_SIGNAL.set()
def dist_init(old_test_method=None, setup_rpc=True, clean_shutdown=True):
"""
We use this decorator for setting up and tearing down state since
MultiProcessTestCase runs each `test*` method in a separate process and
each process just runs the `test*` method without actually calling
'setUp' and 'tearDown' methods of unittest.
"""
# If we use dist_init without arguments (ex: @dist_init), old_test_method is
# appropriately set and we return the wrapper appropriately. On the other
# hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)),
# old_test_method is None and we return a functools.partial which is the real
# decorator that is used and as a result we recursively call dist_init with
# old_test_method and the rest of the arguments appropriately set.
if old_test_method is None:
return partial(
dist_init,
setup_rpc=setup_rpc,
clean_shutdown=clean_shutdown,
)
@wraps(old_test_method)
def new_test_method(self, *arg, **kwargs):
self.worker_id = self.rank
if setup_rpc:
global _ALL_NODE_NAMES
_ALL_NODE_NAMES = {
"worker{}".format(rank) for rank in range(self.world_size)
}
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
rpc_agent_options=self.rpc_agent_options,
)
return_value = old_test_method(self, *arg, **kwargs)
if setup_rpc:
if clean_shutdown:
# Follower reports done.
if self.rank == MASTER_RANK:
on_master_follower_report_done("worker{}".format(MASTER_RANK))
else:
rpc.rpc_async(
"worker{}".format(MASTER_RANK),
on_master_follower_report_done,
args=("worker{}".format(self.rank),),
)
# Master waits for followers to report done.
# Follower waits for master's termination command.
_TERMINATION_SIGNAL.wait()
if self.rank == MASTER_RANK:
# Master sends termination command.
futs = []
for dst_rank in range(self.world_size):
# torch.distributed.rpc module does not support sending to self.
if dst_rank == MASTER_RANK:
continue
dst_name = "worker{}".format(dst_rank)
fut = rpc.rpc_async(dst_name, set_termination_signal, args=())
futs.append(fut)
for fut in futs:
assert fut.wait() is None, "Sending termination signal failed."
# Close RPC. Need to do this even if we don't have a clean shutdown
# since we need to shutdown the RPC agent. If we don't shutdown the
# RPC agent, tests would fail since RPC agent threads, locks and
# condition variables are not properly terminated.
rpc.wait_all_workers()
return return_value
return new_test_method
# Set PROCESS_GROUP as the default RPC backend.
TEST_CONFIG.rpc_backend_name = "PROCESS_GROUP"
TEST_CONFIG.build_rpc_agent_options = lambda test_object: rpc.backend_registry.construct_rpc_agent_options(
test_object.rpc_backend,
# Use enough 'num_send_recv_threads' until we fix https://github.com/pytorch/pytorch/issues/26359
num_send_recv_threads=16,
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b5ba32a152d02ebf1d4a15d0ec3c490370f8e80d | 68aa9bf99d62a5b991dc5aaa3d794f4bcd6e355a | /link/QuadTree.py | cbb0b951ae791d0f2f976b26481bd63d909e2ee6 | [] | no_license | dujodujo/lemur | 82c9e695459597ab1b3430e566bc375af84d563c | 1e6350b33f86f89f89c5bddbd3924364f027160e | refs/heads/master | 2021-01-01T16:49:35.386172 | 2013-11-06T09:59:12 | 2013-11-06T09:59:12 | 14,150,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | import pygame
class QuadTree(object):
def __init__(self, items, depth=8, worldRect=None):
self.topLeft=None
self.topRight=None
self.bottomLeft=None
self.bottomRight=None
depth-=1
if not depth or not items:
self.items=items
return
if worldRect:
rec=worldRect
else:
rec=items[0].rect
for item in items[1:]:
rec.union_ip(item)
self.midx=(rec.left+rec.right)*0.5
self.midy=(rec.top+rec.bottom)*0.5
self.items=[]
topLeftItems=[]
topRightItems=[]
bottomLeftItems=[]
bottomRightItems=[]
for item in items:
topLeft=item.rect.left <= self.midx and item.rect.top <= self.midy
topRight=item.rect.right >= self.midx and item.rect.top <= self.midy
bottomLeft=item.rect.left <= self.midx and item.rect.bottom >= self.midy
bottomRight=item.rect.right >= self.midx and item.rect.bottom >= self.midy
if topLeft and topRight and bottomLeft and bottomRight:
self.items.append(item)
else:
if topLeft: topLeftItems.append(item)
if topRight: topRightItems.append(item)
if bottomLeft: bottomLeftItems.append(item)
if bottomRight: bottomRightItems.append(item)
if topLeftItems:
self.topLeft=QuadTree(topLeftItems, depth,
pygame.Rect(worldRect.left, worldRect.top, self.midx, self.midy))
if topRightItems:
self.topRight=QuadTree(topRightItems, depth,
pygame.Rect(self.midx, worldRect.top, worldRect.right, self.midy))
if bottomLeftItems:
self.bottomLeft=QuadTree(bottomLeftItems, depth,
pygame.Rect(worldRect.left, self.midy, self.midx, worldRect.bottom))
if bottomRightItems:
self.bottomRight=QuadTree(bottomRightItems, depth,
pygame.Rect(self.midx, self.midy, worldRect.right, worldRect.bottom))
def testCollision(self, entity):
hits=set([self.items[i] for i in entity.rect.collidelistall(self.items)])
if self.topLeft and entity.rect.left <= self.midx and entity.rect.top <= self.midy:
hits |= self.topLeft.testCollision(entity.rect)
if self.topRight and entity.rect.right >= self.midx and entity.rect.top <= self.midy:
hits |= self.topRight.testCollision(entity.rect)
if self.bottomLeft and entity.rect.right <= self.midx and entity.rect.bottom >= self.midy:
hits |= self.bottomLeft.testCollision(entity.rect)
if self.bottomRight and entity.rect.left >= self.midx and entity.rect.bottom >= self.midy:
hits |= self.bottomRight.testCollision(entity.rect)
return hits | [
"avsic.ziga@gmail.com"
] | avsic.ziga@gmail.com |
7638f28ec0669017447edb6460d4cc787ad0e580 | 42a0befb594a6c081f3e788016c53889cfa2a9fb | /hackerearth/code_arena/Little Deepu and Little Kuldeep.py | 9a78f1b805e509bebef66a9b94776fafc868707b | [] | no_license | Laksh8/competitive-programming | f436e8c525220ad95ef1c7a9d3aa98b4689d4f92 | 14c20e5cc32263c89a73524ab596efbbba2cc85a | refs/heads/master | 2022-12-24T23:54:16.313515 | 2020-09-08T06:59:11 | 2020-09-08T06:59:11 | 293,727,288 | 2 | 1 | null | 2020-10-04T14:15:38 | 2020-09-08T06:57:35 | Python | UTF-8 | Python | false | false | 434 | py | # Little Deepu and Little Kuldeep :--
'''
for _ in range(int(input())):
n = int(input())
lst = []
for i in range(n):
lst.append(int(input()))
lst.sort()
count = 1
for i in range(1,n):
if lst[i-1] == lst[i]:
count +=1
print(count)
'''
n = int(input())
lst = list(map(int,input().split()))
count = 0
for i in range(n-1):
count += lst[i] * lst[i+1]
print(count%1000000007)
| [
"lakshitkhanna311@gmail.com"
] | lakshitkhanna311@gmail.com |
71316e76e409be063f25d5e6a30a644009ff048b | 5596de7dcb5318f6f3cb61e40f8f9fe5cfb80e2f | /programmers/level2/2021.06.07~2021.06.13(10주차)/영어 끝말잇기/rlatmd0829.py | 9febaa152e40faf63d85dce0c6614e69cf08fee2 | [] | no_license | AlgorithmStudy2021/algorithm | f4373c46398bfa6ed760f7e1999476d5297c8fa3 | 8ccb6232c979104cfd550014444fa8ad6122f089 | refs/heads/main | 2023-08-18T08:52:07.041220 | 2021-09-05T10:27:34 | 2021-09-05T10:27:34 | 361,409,510 | 1 | 7 | null | 2021-06-26T08:57:47 | 2021-04-25T11:28:35 | Java | UTF-8 | Python | false | false | 722 | py | def solution(n, words):
answer = []
temp = []
temp.append(words[0])
result1 = 0
result2 = 0
for i in range(1, len(words)):
if words[i-1][-1] == words[i][0]:
if words[i] not in temp:
temp.append(words[i])
else:
result1 = i % n + 1 # 몇번째 사람이 떨어졌는지
result2 = i // n + 1 # 떨어진 사람이 몇번째에 떨어졌는지
return [result1, result2]
else:
result1 = i % n + 1 # 몇번째 사람이 떨어졌는지
result2 = i // n + 1 # 떨어진 사람이 몇번째에 떨어졌는지
return [result1, result2]
else:
return [0,0] | [
"rlatmd0829@naver.com"
] | rlatmd0829@naver.com |
38d35e1bc6fefd1c71f9bc737d0dfeb9dfe98d29 | b28b8df0b3c834c674d69abbef1281dae1256a95 | /strawberry/contrib/starlette/app/utils.py | f19938bfde088e501e2d3e37036df01c441930ad | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ethe/strawberry | fa1f58b8907bcccf5bd7dba96a3a406f636059df | 9015895966bbe733af0e5789f425d0be6c9ecf79 | refs/heads/master | 2020-06-04T12:59:22.597999 | 2019-06-24T02:41:25 | 2019-06-24T02:41:25 | 192,031,976 | 0 | 0 | null | 2019-06-15T03:27:27 | 2019-06-15T03:27:27 | null | UTF-8 | Python | false | false | 305 | py | import pathlib
def get_playground_template(request_path: str) -> str:
here = pathlib.Path(__file__).parents[1]
templates_path = here / "templates"
with open(templates_path / "playground.html") as f:
template = f.read()
return template.replace("{{REQUEST_PATH}}", request_path)
| [
"patrick.arminio@gmail.com"
] | patrick.arminio@gmail.com |
cb9c1da78c87247d38c9385b7873df365cb7db48 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client_common/client_request_lib/__init__.py | 2e3e2e5d97f1f685dcd3f48c5441af2fbf04e318 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 395 | py | # 2017.02.03 21:54:38 Střední Evropa (běžný čas)
# Embedded file name: scripts/client_common/client_request_lib/__init__.py
__version__ = '0.1'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client_common\client_request_lib\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:54:38 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
d06dd75178c69ecb8d56ebd4435d410626c24b72 | f3428e98300adc919dbf99c8a57b4e9bcc8396b0 | /tut17.py | 48ce570c0c924fee4ffb993ab25ae02717f29cfe | [] | no_license | Pankaj-GoSu/Tkinter-Python | 08b979e7458f593c10aa68d53532e224d1bd4d7c | f7d96e5585e096175b78450087ca602cfd44d476 | refs/heads/master | 2023-06-26T11:38:47.361881 | 2021-07-24T16:07:05 | 2021-07-24T16:07:05 | 386,153,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | #=========== Creating RadioButtons In Tkinter ========>>>
from tkinter import *
import tkinter.messagebox as tmsg
def Order():
tmsg.showinfo("Order Recieve",f"We have recieved your order for {var.get()}. Thanks for ordering")
root = Tk()
root.geometry("455x230")
root.title("Radiobutton Tutorial")
var = IntVar()
# var.set(1)
Label(root,text = "What would you like to have sir?",justify=LEFT,padx=14,font="lucida 15 bold").pack(anchor="w")
radio = Radiobutton(root,text="Dosa",padx=14,variable=var,value=1).pack(anchor="w")
radio = Radiobutton(root,text="Idly",padx=14,variable=var,value=2).pack(anchor="w")
radio = Radiobutton(root,text="Paratha",padx=14,variable=var,value=3).pack(anchor="w")
radio = Radiobutton(root,text="Samosa",padx=14,variable=var,value=4).pack(anchor="w")
Button(text="Order Now",command=Order).pack()
root.mainloop()
| [
"Pankajgoswami330@gmail.com"
] | Pankajgoswami330@gmail.com |
e99a6c93e037dcfbd3db5d683cc1dfdfcf2ec887 | 9b11e49cbb9120f3f7e69a8884c0cee42896566d | /cutouts_combined_tag.py | e1e8cbc45ce1255a298f103124fe8aec518ed210 | [] | no_license | albusdemens/Backscattering_3DND | e6966cd8c39342181183de20028b959227a5f570 | f911f1f9f9bf863daffe2bcc85bd7d7a94e5b3c7 | refs/heads/master | 2021-01-21T13:53:17.469879 | 2016-06-27T10:51:45 | 2016-06-27T10:51:45 | 44,485,363 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,752 | py | # Alberto Cereser, 17 Mar 2014
# alcer@fysik.dtu.dk, Technical University of Denmark
# For each tag, this code puts together all the cutouts
# Name changed from cutouts_combined_simple to cutouts_combined after deleting cutouts_combined (old version)
import scipy
#from scipy import signal
import pandas as pd
import numpy as np
import math
import sys
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
from skimage import data
from skimage.feature import match_template
import cv2
import gc
if __name__ == '__main__':
data = pd.read_csv('Fe_PSI_spt_tagged_points_grouped.txt', sep=" ", header = None)
data.columns = ["Angle_number", "Omega", "Intensity", "X", "Y", "Address", "Correlation", "ID", "ID_next", "Tag"]
# Here are the variables to change, depending on the number of projections considered
# and on the lines of the txt file (Fe_PSI_spt_refined.txt)
Number_of_lines_in_txt = 3462
Number_of_tags = 544
# The idea is, for each angle, to make a new image putting together the cutouts
# Expressed in global coordinates
for i in range(1, (Number_of_tags + 1)):
new_image = np.zeros((2700, 2300)) # 2592 is the number of rows, and 2160 the number of columns (RC, as in Roman Catholic)
filename_new_image = ("/Users/Alberto/Documents/Data_analysis/ICON_Aug2013/Data_analysis/track_tag/track_tag_%03i.txt" % (i))
for j in range(0, Number_of_lines_in_txt - 1): # We read each line of the refined spt file
# Remember that the number of the first line is 0
if (data.Tag[j] == i):
x_center_blob = data.X[j]
y_center_blob = data.Y[j]
cutout = np.loadtxt(data.Address[j])
array_cutout = np.array(cutout)
ran = array_cutout.shape
ran_x_before_connectivity_search = ran[0]
ran_y_before_connectivity_search = ran[1]
size_box = 150
# Remember that, for the cutouts, the center of the matrix is different from its cm!!!
x_min_before_connectivity_search = int(x_center_blob - (0.5*(ran_x_before_connectivity_search)))
x_max_before_connectivity_search = int(x_center_blob + (0.5*(ran_x_before_connectivity_search)))
y_min_before_connectivity_search = int(y_center_blob - (0.5*(ran_y_before_connectivity_search)))
y_max_before_connectivity_search = int(y_center_blob + (0.5*(ran_y_before_connectivity_search)))
x_min = int(x_center_blob - (0.5*(size_box)))
x_max = int(x_center_blob + (0.5*(size_box)))
y_min = int(y_center_blob - (0.5*(size_box)))
y_max = int(y_center_blob + (0.5*(size_box)))
if int(x_min) < 0:
x_min = int(0)
if int(y_min) < 0:
y_min = int(0)
if int(x_max) > 2159: #The size is 2160x2592, but I consider 2159 and 2591 because after we deal with x_max+1 and y_max+1
x_max = int(2159)
if int(y_max) > 2591:
y_max = int(2591)
x_MIN = max(x_min, x_min_before_connectivity_search)
x_MAX = min(x_max, x_max_before_connectivity_search)
y_MIN = max(y_min, y_min_before_connectivity_search)
y_MAX = min(y_max, y_max_before_connectivity_search)
size_box_x = x_MAX - x_MIN + 1
size_box_y = y_MAX - y_MIN + 1
for k in range(0, size_box_x - 1): # Roman Catholic!
for l in range(0, size_box_y - 1):
print x_MIN, x_MAX, y_MIN, y_MAX, (size_box_x - 1), (size_box_y - 1), j
# The formulas for k in l in global coordinates take into account that
# the dimension of the cutouts is not fixed
global_l = int(l + x_center_blob - (x_center_blob - x_MIN))
global_k = int(k + y_center_blob - (y_center_blob - y_MIN))
#print array_cutout[k,l]
new_image[global_k, global_l] = array_cutout[k, l]
#plt.imshow(new_image)
#plt.show()
np.savetxt(filename_new_image, new_image, fmt='%i')
del new_image
gc.collect() | [
"mannaro85@gmail.com"
] | mannaro85@gmail.com |
ef2fc7e547f30ab9c59923a7b8be3db7f3fd1801 | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/building-complaint-disposition-codes/depositor.py | 0020b2bf4c2adff3687c38b0cce607ab7cc8fc14 | [] | no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/views/6v9u-ndjg/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/building-complaint-disposition-codes/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/building-complaint-disposition-codes/data.csv"]
| [
"aleksey.bilogur@gmail.com"
] | aleksey.bilogur@gmail.com |
aec4ce087a5b4811d8f4512875bbb843645ddf41 | 1509d32ef8854845428961c3ead89fff26c0dd9d | /triangle_mov.py | d295e5b286b13954bfbde51492cf002ba2e9cb34 | [] | no_license | antonioam82/ejercicios-python | d623324e2e59ecddcf03400064a8aa3591bfd7e5 | 2dfe23494b9b34a9a0abe9379dcb69af9b1e9d73 | refs/heads/master | 2023-09-04T02:16:55.584179 | 2023-09-03T19:50:05 | 2023-09-03T19:50:05 | 137,039,851 | 35 | 41 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | import glfw
from OpenGL.GL import *
import numpy as np
from math import sin, cos, tan
# initializing glfw library
if not glfw.init():
raise Exception("glfw can not be initialized!")
# creating the window
window = glfw.create_window(1280, 720, "My OpenGL window", None, None)
# check if window was created
if not window:
glfw.terminate()
raise Exception("glfw window can not be created!")
# set window's position
glfw.set_window_pos(window, 400, 200)
# make the context current
glfw.make_context_current(window)
vertices = [-0.5, -0.5, 0.0,
0.5, -0.5, 0.0,
0.0, 0.5, 0.0]
colors = [1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0]
def window_resize(window,width,height):
glViewport(0, 0, width, height)
glfw.set_window_size_callback(window, window_resize)
vertices = np.array(vertices, dtype=np.float32)
colors = np.array(colors, dtype=np.float32)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, vertices)
glEnableClientState(GL_COLOR_ARRAY)
glColorPointer(3, GL_FLOAT, 0, colors)
#glClearColor(0, 0.1, 0.1, 1)
#glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# the main application loop
while not glfw.window_should_close(window):
glfw.poll_events()
glClear(GL_COLOR_BUFFER_BIT)
glClearColor(0.0,0.0,0.0,0.0)#PANTALLA NEGRA
ct = glfw.get_time() # returns the elapsed time, since init was called
glLoadIdentity()
#glScale(abs(sin(ct)),abs(sin(ct)),1)#glScale(abs(cos(ct)), abs(sin(ct)), 1)
#glRotatef(tan(ct)*45,0,0,1)#glRotatef(cos(ct) * 360, 0, 0, 1)# sin 45
#glTranslatef(sin(ct), cos(ct), 0)
glScale(abs(sin(ct)),abs(sin(ct)),1)
glRotatef((ct)*360,0,0,1)#glRotatef(tan(ct)*360,0,0,1)
glTranslatef(sin(ct), cos(ct), 0)
#glScale(abs(cos(ct)), abs(sin(ct)), 1)
#glRotatef(cos(ct) * 360, 0, 0, 1)
#glTranslatef(sin(ct), cos(ct), 0)
glDrawArrays(GL_TRIANGLES, 0, 3)
glfw.swap_buffers(window)
# terminate glfw, free up allocated resources
glfw.terminate()
| [
"noreply@github.com"
] | antonioam82.noreply@github.com |
2662a27b33bd4c9daf5148443daab0522772d756 | 1409a1ff20261b688d7c0971bd8d9e8e2b2394e1 | /自动调整图像边距, 调整 图例的位置/matplotlib_work_3_and_4.py | 677d66d6a336d9596ad5193a808cca5e8159643b | [] | no_license | crowluya/matplotlib_study | 75ca5a2314a433780179e3205896b698b97f2d5e | f3a9d6aace26eb056b6f7d86509108ce9a87df75 | refs/heads/master | 2020-03-25T02:17:17.781649 | 2018-08-03T12:21:24 | 2018-08-03T12:21:24 | 143,282,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,493 | py | import time
# 已完成
# 1.在X轴, y轴上根据传入的参数, 曲线数据
# 自动缩放图像的边距
# 没有用到缩放功能 不知道是不是老师想要的功能
# 2.lenged可以根据参数自由移动
def time_stamp():
# 时间戳工具
# 返回当前时间
# 2018/08/03 10:14:09
format = '%Y/%m/%d %H:%M:%S'
value = time.localtime(int(time.time()))
dt = time.strftime(format, value)
return dt
def log(*args, **kwargs):
'''
断点调试工具,可以接收任意多参数
要格式化字符串 应该传入参数时自己自带'\n '
from utils import log_by_time as log
:param args:
:param kwargs:
:return:
'''
print(*args, **kwargs)
def log_by_time(*args, **kwargs):
'''
控制台输出带时间的log
from utils import log_by_time as log_t
:param args:
:param kwargs:
:return:
'''
dt = time_stamp()
print(dt, *args, **kwargs)
def log_to_file(*args, **kwargs):
'''
# 接收任意参数
# 将log文件写入到log.file.txt文件中
# 引入log可以这样写
from utils import log_to_file as log_f
'''
filename = 'log.file.txt'
dt = time_stamp()
# 本地不存在该log文件创建 存在则追加log到文件中
with open(filename, 'a', encoding='utf-8') as f:
print(dt, *args, file=f, **kwargs)
def log_with_file_and_console(*args, **kwargs):
'''
# 控制台打印log
# 将log文件写入到log.file.txt文件中
# 其他文件引入本log函数可以这样写
from utils import log_with_file_and_console as log_w
:param args:
:param kwargs:
:return:
'''
filename = 'log.file.txt'
dt = time_stamp()
print(dt, *args, **kwargs)
# 本地不存在该文件创建 存在则追加log到文件中
with open(filename, 'a', encoding='utf-8') as f:
print(dt, *args, file=f, **kwargs)
def test():
t1 = 0
t2 = 1
# t3是空字符串 '防止变量为空, 输出时需要加括号, ({})'.format(t3)
t3 = ''
log("这是不带时间的log \n 自行美化化字符串".format(t1))
log_by_time('这是带时间的log, 防止变量值为空, 输出时需要加括号,t3=({})'.format(t3))
log_to_file('控制台不显示log输出 当前时间 log 到文件中, ', t1, t2)
log_with_file_and_console('输出 当前时间 log 到文件中, 控制台显示t1={} t2={}'.format(t1, t2))
pass
if __name__ == '__main__':
test()
| [
"s1107308633@gmail.com"
] | s1107308633@gmail.com |
3f4c5756beeb72ec71b76413a6f022b2120ea15a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2864/60697/295608.py | 753b8dcdb19704ec519b7e561cafcc0319699ca9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | size=int(input())
res=list(map(int,input().split(' ')))
tmp=[0 for i in range(10000)]
for num in res:
tmp[num]+=num
mx=max(res)
ans=[0 for i in range(mx+1)]
ans[1]=tmp[1]
for i in range(2,mx+1):
ans[i]=max(ans[i-1],ans[i-2]+tmp[i])
print(ans[mx])
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
5e60982973bc8c22885c25fb5098485a46ac955a | 990d36e9d501e2f18d071df7708f56400eddd48a | /migrations/0001_initial.py | 2fc77c04dd5e73305af426671bbc44250a9875c0 | [
"BSD-2-Clause"
] | permissive | andywar65/dxf2vr | e17b238efd0a25cdcd6a9a22ca2f60fe1d74d8aa | b51f8579b0e84c770f54c9d20b42cfe8d84a9031 | refs/heads/master | 2021-09-04T06:56:20.477939 | 2018-01-16T22:45:27 | 2018-01-16T22:45:27 | 103,447,216 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-18 22:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
#('wagtailcore', '0040_page_draft_title'),
#('wagtailimages', '0019_delete_filter'),
#('wagtaildocs', '0007_merge'),
]
operations = [
migrations.CreateModel(
name='Dxf2VrPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', models.CharField(blank=True, max_length=250, null=True)),
('dxf_file', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document')),
('equirectangular_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='Dxf2VrPageMaterialImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('layer', models.CharField(max_length=250)),
('color', models.CharField(default='white', max_length=250)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='material_images', to='dxf2vr.Dxf2VrPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| [
"andy.war1965@gmail.com"
] | andy.war1965@gmail.com |
a6ba3b2c9d3f74fe814e17391bb71ce9a557b067 | 1e3ec0869e6b59ab8d5bf0ae76374a1971742705 | /code/paths.py | 59ca74174d635c7622010d39f1e78cbe5069f3b0 | [
"MIT"
] | permissive | TomDonoghue/SIMparam | 816ee1e19eb86c6ce4baa939ceb157585e0ae175 | 87433414a44cca2c7448c1d55d873dce9eed783d | refs/heads/main | 2023-03-16T19:18:42.661943 | 2021-03-20T00:37:12 | 2021-03-20T00:37:12 | 134,114,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | """Path and file information."""
###################################################################################################
###################################################################################################
# Project paths
DATA_PATH = '../data/'
FIGS_PATH = '../figures/'
# Plot file settings
SAVE_EXT = '.pdf'
| [
"tdonoghue@ucsd.edu"
] | tdonoghue@ucsd.edu |
25779d5cf0aac0360e052323f14bdf6d7c06617b | 8b1945b9698fcba3ea409e838c3ed9b18bba05b1 | /day12/Django/web/settings.py | 514339c0f32a9afaffe42c79dc75cfbc3a28b443 | [
"Apache-2.0"
] | permissive | Vanessa-kriby/Python | 659a67c9c9b1a79490374c3991fcd63bfbde9d56 | 1fbef67852fb362712fc48fa5c3c29eac68fe202 | refs/heads/main | 2023-08-22T19:40:03.538141 | 2021-10-16T10:09:03 | 2021-10-16T10:30:15 | 417,788,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,630 | py | """
Django settings for web project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '29g%=by_ko-)@u+umst=^+^r(425vk=-l!dfh0)oey7(6sap5+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders', #新加的应用 pip install django-cors-headers
'app1',
'app2',
'app4',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware', # 新加的插件 pip install django-cors-headers
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # 数据库引擎
'NAME': 'db1', # 数据库名
'USER': 'root', # 账号
'PASSWORD': '12345678', # 密码
'HOST': '172.16.48.91', # HOST
'POST': 3306, # 端口
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL=True # 新加的配置 pip install django-cors-headers | [
"1823954042@qq.com"
] | 1823954042@qq.com |
3c2836d1dbd22293cd7cecc302318f07c18122c0 | 09cd81a2d576fb2a01de69d3b5fec909567ba9ff | /apps/goods/admin.py | 8102237867f91749d452f907926f13a921b7d930 | [] | no_license | hua1054921935/dailyfresh | f80e0b7aea9ebcb214d89bb94a0b650aebeed47d | 75fa3bce313c8df8e948bf11d6e3b8faf2fabbee | refs/heads/master | 2021-09-05T12:03:38.997669 | 2018-01-27T08:27:27 | 2018-01-27T08:27:27 | 118,752,919 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,647 | py | from django.contrib import admin
from django.core.cache import cache
from apps.goods.models import GoodsType, IndexGoodsBanner, IndexPromotionBanner, IndexTypeGoodsBanner
# Register your models here.
class BaseAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
"""新增或更新时调用"""
# 调用父类的方法执行更新,新增操作
super().save_model(request, obj, form, change)
# 附加操作:发出generate_static_index_html任务
print('发出generate_static_index_html任务')
from celery_tasks.tasks import generate_static_index_html
generate_static_index_html.delay()
# 附加操作: 清除首页缓存数据
cache.delete('index_page_data')
def delete_model(self, request, obj):
"""删除时调用"""
# 调用父类的方法执行删除操作
super().delete_model(request, obj)
# 附加操作:发出generate_static_index_html任务
from celery_tasks.tasks import generate_static_index_html
generate_static_index_html.delay()
# 附加操作: 清除首页缓存数据
cache.delete('index_page_data')
class GoodsTypeAdmin(BaseAdmin):
pass
class IndexGoodsBannerAdmin(BaseAdmin):
pass
class IndexPromotionBannerAdmin(BaseAdmin):
pass
class IndexTypeGoodsBannerAdmin(BaseAdmin):
pass
admin.site.register(GoodsType, GoodsTypeAdmin)
admin.site.register(IndexGoodsBanner, IndexGoodsBannerAdmin)
admin.site.register(IndexPromotionBanner, IndexPromotionBannerAdmin)
admin.site.register(IndexTypeGoodsBanner, IndexTypeGoodsBannerAdmin)
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
c362e8a68642bd5acaac31790d525a74e6934411 | 84721ed008c94f2c7351c63a6dd23d4107efcce0 | /vimlite/VimLite/FileEntry.py | 8c40ae8b61ee8c4b2f6c67fe245f76c84f0f5549 | [] | no_license | vim-scripts/VimLite | 17277b09a208d437d0b3a5e514b483bc5301ee4f | cd00cb17d98efed12e1e364dae8676101394f135 | refs/heads/master | 2016-09-07T18:50:25.672256 | 2013-01-25T00:00:00 | 2013-01-28T02:41:31 | 1,970,791 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
class FileEntry:
def __init__(self):
self.id = -1
self.file = ''
self.lastRetaggedTimestamp = 0 # 可用当前时间初始化
def SetFile(self, file):
self.file = file
def SetLastRetaggedTimestamp(self, lastRetaggedTimestamp):
self.lastRetaggedTimestamp = lastRetaggedTimestamp
def GetFile(self):
return self.file
def GetLastRetaggedTimestamp(self):
return self.lastRetaggedTimestamp
def SetId(self, id):
self.id = id
def GetId(self):
return self.id
| [
"scraper@vim-scripts.org"
] | scraper@vim-scripts.org |
d2254abcc7328d10d5d0563fd41ad5def662a6b1 | b4406e5753581699ed653609de0d5a85f8bbbfd0 | /PythonExamples/commandline.py | ab72b1076271965fa3b21561f00f1d3a6115678d | [] | no_license | jigarmistry/new | 1adf6ee47b0022d34b02cdf801852205c560f6ba | a20c44a1f6342c416b0d4ee78407244744969475 | refs/heads/master | 2016-09-05T20:52:11.766084 | 2014-10-14T16:18:56 | 2014-10-14T16:19:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | #------------------------------------------------------------------------------
# Name: commandline.py
# Author: Jigar Mistry
# Last Modified: 05/08/2013
# Description: This Python script demonstrates how to use
# command line arguments.
#------------------------------------------------------------------------------
import sys
usage='[file name] [your argument]';
if len( sys.argv ) < 2:
print usage
else:
print sys.argv[0]
print sys.argv[1]
| [
"jigarmistry24@gmail.com"
] | jigarmistry24@gmail.com |
a71b34d59d892a2d7eb5c341e3d2f9cdd8dac544 | 7fb48e20234b9cb91b60a801d1b3ceec059632ff | /responses/registries.py | 049df23f459c605bcf419007636a14d01fc726b2 | [
"Apache-2.0"
] | permissive | timgates42/responses | bc7b2f9004abdcb1f385c8346c41e41acabeff0c | d49f82958887b0f55479714319df22a47c4fd3bc | refs/heads/master | 2023-03-16T15:43:44.565465 | 2022-07-15T14:05:50 | 2022-07-15T14:05:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,281 | py | import copy
from typing import TYPE_CHECKING
from typing import List
from typing import Optional
from typing import Tuple
if TYPE_CHECKING: # pragma: no cover
# import only for linter run
from requests import PreparedRequest
from responses import BaseResponse
class FirstMatchRegistry(object):
def __init__(self) -> None:
self._responses: List["BaseResponse"] = []
@property
def registered(self) -> List["BaseResponse"]:
return self._responses
def reset(self) -> None:
self._responses = []
def find(
self, request: "PreparedRequest"
) -> Tuple[Optional["BaseResponse"], List[str]]:
found = None
found_match = None
match_failed_reasons = []
for i, response in enumerate(self.registered):
match_result, reason = response.matches(request)
if match_result:
if found is None:
found = i
found_match = response
else:
if self.registered[found].call_count > 0:
# that assumes that some responses were added between calls
self.registered.pop(found)
found_match = response
break
# Multiple matches found. Remove & return the first response.
return self.registered.pop(found), match_failed_reasons
else:
match_failed_reasons.append(reason)
return found_match, match_failed_reasons
def add(self, response: "BaseResponse") -> "BaseResponse":
if any(response is resp for resp in self.registered):
# if user adds multiple responses that reference the same instance.
# do a comparison by memory allocation address.
# see https://github.com/getsentry/responses/issues/479
response = copy.deepcopy(response)
self.registered.append(response)
return response
def remove(self, response: "BaseResponse") -> List["BaseResponse"]:
removed_responses = []
while response in self.registered:
self.registered.remove(response)
removed_responses.append(response)
return removed_responses
def replace(self, response: "BaseResponse") -> "BaseResponse":
try:
index = self.registered.index(response)
except ValueError:
raise ValueError(
"Response is not registered for URL {}".format(response.url)
)
self.registered[index] = response
return response
class OrderedRegistry(FirstMatchRegistry):
def find(
self, request: "PreparedRequest"
) -> Tuple[Optional["BaseResponse"], List[str]]:
if not self.registered:
return None, ["No more registered responses"]
response = self.registered.pop(0)
match_result, reason = response.matches(request)
if not match_result:
self.reset()
self.add(response)
reason = (
"Next 'Response' in the order doesn't match "
f"due to the following reason: {reason}."
)
return None, [reason]
return response, []
| [
"noreply@github.com"
] | timgates42.noreply@github.com |
a0d460e3fb827205f1c61de5d4183851f61d5dc8 | af8b929b1bd7375e8f7edde0bfc5680ca8508186 | /polls/urls.py | 2afbd6f6441deb1990e0a9ff3b7ea6692be26130 | [] | no_license | guptabhishek8/Django | 96be734879f166a415c0c213fabd234bc5fba757 | b9c66e0459d734b7bbdbd3dd8bb047816bb77105 | refs/heads/master | 2020-07-29T20:26:14.824416 | 2019-11-07T13:47:13 | 2019-11-07T13:47:13 | 209,947,850 | 0 | 1 | null | 2019-11-07T13:47:15 | 2019-09-21T08:05:56 | Python | UTF-8 | Python | false | false | 414 | py | from django.urls import path
from . import views
app_name = 'polls'
urlpatterns= [
path('',views.IndexView.as_view(), name='index'),
#ex: /polls/5/
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
# ex: /polls/5/results/
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
1233c814266406e6176149d5ce8d20f3960e5f43 | d50685a3f3d612349b1f6627ed8b807f0eec3095 | /start/sina/raokouling.py | 51693160774fe2f1407e57171be5785beb72f26a | [] | no_license | Erich6917/python_littlespider | b312c5d018bce17d1c45769e59243c9490e46c63 | 062206f0858e797945ce50fb019a1dad200cccb4 | refs/heads/master | 2023-02-12T23:22:27.352262 | 2021-01-05T06:21:20 | 2021-01-05T06:21:20 | 113,631,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | # -*- coding: utf-8 -*-
# @Time : 2017/9/22
# @Author : LIYUAN134
# @File : raokouling.py
# @Commment: 绕口令录入
#
from bloom.sina.tools import sinaSoup as bSoup
from dbutil.DBNewsUtil import DBNews
BASEURL = 'https://raokouling.911cha.news/'
def rao_content():
url_list = []
soup = bSoup.soup_urlopen(BASEURL)
# other sports
titles = soup.select('.panel .mcon.bt.f14 ul li a')
if len(titles) > 0:
for ii in titles:
urls = BASEURL + ii.get('href')
# print ii.text, (BASEURL + ii.get('href'))
url_list.append(urls)
else:
print '未定位到[绕口令主页]'
return url_list
def rao_parse_main(url):
soup = bSoup.soup_urlopen(url)
dialog = soup.select('.mcon.f14.noi p')
msgarr = []
if len(dialog) > 0:
# print msg[0].text
for single in dialog:
msgarr.append(single.text)
# return msg[0].text
return "".join(msgarr)
else:
print "None"
return None
def rao_geturl_all():
url_list = []
for siz in range(5, 5):
url_list.extend(rao_content())
print '页面捕获完毕,共计收集到地址:FINAL:', len(url_list)
rdicts = {}
for url in url_list:
vmsg = rao_parse_main(url)
if vmsg is not None:
rdicts[url] = vmsg
else:
print 'URL为None 跳过'
print '结果集如下', len(rdicts)
for k, v in rdicts.items():
print k, v
return rdicts
def rao_telnet_main():
rdicts = rao_geturl_all()
scope = 'rap'
dbnews = DBNews()
dbnews.save_sina_telnet_result(rdicts, scope)
print '顺口溜收集完成!'
if __name__ == "__main__":
# rao_geturl_all()
rao_telnet_main()
# rao_geturl_all()
| [
"1065120559@qq.com"
] | 1065120559@qq.com |
5825f32bc9ab92ab51bcb044e7adc21e4246067e | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res/scripts/client/gui/shared/gui_items/dossier/achievements/promisingfighterachievement.py | c3a044d4f3b6015c00d51f964e75f09ebee38268 | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 874 | py | # 2016.05.01 15:23:59 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/shared/gui_items/dossier/achievements/PromisingFighterAchievement.py
from dossiers2.ui.achievements import ACHIEVEMENT_BLOCK as _AB
from abstract import SimpleProgressAchievement
class PromisingFighterAchievement(SimpleProgressAchievement):
def __init__(self, dossier, value = None):
super(PromisingFighterAchievement, self).__init__('promisingFighterMedal', _AB.TEAM_7X7, dossier, value)
def _readProgressValue(self, dossier):
return dossier.getRecordValue(_AB.TEAM_7X7, 'promisingFighter')
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\gui_items\dossier\achievements\promisingfighterachievement.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:23:59 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
a45a2b44496d0a7adc03eb50b3cb01f59635c7dd | 1aa5216e8ed1fc53999637a46c6af0716a8a8cdf | /disk_snapshot_service/anytree/node/node.py | 6e7c2a71fcd835412850016a835094973fd31354 | [] | no_license | ShawnYi5/DiskInProgress | c3a47fd5c52b1efeeaeee5b0de56626077a947a4 | b13d0cdcd0ab08b6dd5b106cda739d7c8ac9e41a | refs/heads/master | 2020-05-17T06:19:40.579905 | 2019-08-22T03:29:31 | 2019-08-22T03:29:31 | 183,555,060 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | # -*- coding: utf-8 -*-
from .nodemixin import NodeMixin
from .util import _repr
class Node(NodeMixin, object):
def __init__(self, name, parent=None, children=None, **kwargs):
u"""
A simple storage_tree node with a `name` and any `kwargs`.
The `parent` attribute refers the parent node:
>>> from anytree import Node, RenderTree
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root)
>>> s0b = Node("sub0B", parent=s0, foo=4, bar=109)
>>> s0a = Node("sub0A", parent=s0)
>>> s1 = Node("sub1", parent=root)
>>> s1a = Node("sub1A", parent=s1)
>>> s1b = Node("sub1B", parent=s1, bar=8)
>>> s1c = Node("sub1C", parent=s1)
>>> s1ca = Node("sub1Ca", parent=s1c)
>>> print(RenderTree(root))
Node('/root')
├── Node('/root/sub0')
│ ├── Node('/root/sub0/sub0B', bar=109, foo=4)
│ └── Node('/root/sub0/sub0A')
└── Node('/root/sub1')
├── Node('/root/sub1/sub1A')
├── Node('/root/sub1/sub1B', bar=8)
└── Node('/root/sub1/sub1C')
└── Node('/root/sub1/sub1C/sub1Ca')
The same storage_tree can be constructed by using the `children` attribute:
>>> root = Node("root", children=[
... Node("sub0", children=[
... Node("sub0B", bar=109, foo=4),
... Node("sub0A", children=None),
... ]),
... Node("sub1", children=[
... Node("sub1A"),
... Node("sub1B", bar=8, children=[]),
... Node("sub1C", children=[
... Node("sub1Ca"),
... ]),
... ]),
... ])
>>> print(RenderTree(root))
Node('/root')
├── Node('/root/sub0')
│ ├── Node('/root/sub0/sub0B', bar=109, foo=4)
│ └── Node('/root/sub0/sub0A')
└── Node('/root/sub1')
├── Node('/root/sub1/sub1A')
├── Node('/root/sub1/sub1B', bar=8)
└── Node('/root/sub1/sub1C')
└── Node('/root/sub1/sub1C/sub1Ca')
"""
self.__dict__.update(kwargs)
self.name = name
self.parent = parent
if children:
self.children = children
def __repr__(self):
# 以path形式显示,类似于:/root/sub0/sub0A
args = ["%r" % self.separator.join([""] + [str(node.name) for node in self.path])]
return _repr(self, args=args, nameblacklist=["name"])
| [
"yi.shihong@aliyun.com"
] | yi.shihong@aliyun.com |
ef8bca0cbf358a97ba17980d28c11faa2f5d78ad | 620d21623a300821e2a195eed5434bac67fb4dca | /abb_experimental_ws/build/rosserial_arduino/catkin_generated/rosserial_arduino-extras.cmake.develspace.context.cmake.py | e5bc5d6d1a7bdbcec0598160d9eaa4a4e2118259 | [] | no_license | yazici/Robarch-dev | bb63c04bd2e62386e7c1215bf5b116ccd763c455 | 9f8db4c418db3fc80454200cb6625cc2b2151382 | refs/heads/master | 2020-05-21T02:32:06.765535 | 2018-09-07T15:50:26 | 2018-09-07T15:50:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | # generated from catkin/cmake/template/cfg-extras.context.py.in
DEVELSPACE = 'TRUE' == 'TRUE'
INSTALLSPACE = 'FALSE' == 'TRUE'
CATKIN_DEVEL_PREFIX = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/devel/.private/rosserial_arduino'
CATKIN_GLOBAL_BIN_DESTINATION = 'bin'
CATKIN_GLOBAL_ETC_DESTINATION = 'etc'
CATKIN_GLOBAL_INCLUDE_DESTINATION = 'include'
CATKIN_GLOBAL_LIB_DESTINATION = 'lib'
CATKIN_GLOBAL_LIBEXEC_DESTINATION = 'lib'
CATKIN_GLOBAL_PYTHON_DESTINATION = 'lib/python2.7/dist-packages'
CATKIN_GLOBAL_SHARE_DESTINATION = 'share'
CATKIN_PACKAGE_BIN_DESTINATION = 'lib/rosserial_arduino'
CATKIN_PACKAGE_ETC_DESTINATION = 'etc/rosserial_arduino'
CATKIN_PACKAGE_INCLUDE_DESTINATION = 'include/rosserial_arduino'
CATKIN_PACKAGE_LIB_DESTINATION = 'lib'
CATKIN_PACKAGE_LIBEXEC_DESTINATION = ''
CATKIN_PACKAGE_PYTHON_DESTINATION = 'lib/python2.7/dist-packages/rosserial_arduino'
CATKIN_PACKAGE_SHARE_DESTINATION = 'share/rosserial_arduino'
CMAKE_BINARY_DIR = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/build/rosserial_arduino'
CMAKE_CURRENT_BINARY_DIR = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/build/rosserial_arduino'
CMAKE_CURRENT_SOURCE_DIR = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/src/rosserial/rosserial_arduino'
CMAKE_INSTALL_PREFIX = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/install'
CMAKE_SOURCE_DIR = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/src/rosserial/rosserial_arduino'
PKG_CMAKE_DIR = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/devel/.private/rosserial_arduino/share/rosserial_arduino/cmake'
PROJECT_NAME = 'rosserial_arduino'
PROJECT_BINARY_DIR = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/build/rosserial_arduino'
PROJECT_SOURCE_DIR = '/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/src/rosserial/rosserial_arduino'
| [
"email.jrv@gmail.com"
] | email.jrv@gmail.com |
7fb3ec014ac72b8c0635580c6ecb27cd3a5b3da1 | f1d2073c23d247ea16ca4c696639a73f4eaa788c | /tests/p2p/test_kademlia_node_picklable.py | 661d478de63c9f2f0066df9fc8f97edf69b158ec | [
"MIT"
] | permissive | gengmoqi/trinity | 58edacd549667e7d0f5553278d784dacc1225dd9 | 2df1be3bc22b9d8de08711e5692a051d8dfc4d11 | refs/heads/master | 2022-11-26T22:06:18.314721 | 2020-07-22T22:16:47 | 2020-07-27T10:57:28 | 282,907,384 | 1 | 0 | MIT | 2020-07-27T13:32:28 | 2020-07-27T13:32:27 | null | UTF-8 | Python | false | false | 198 | py | import pickle
from p2p.tools.factories import NodeFactory
def test_kademlia_node_is_pickleable():
node = NodeFactory()
result = pickle.loads(pickle.dumps(node))
assert result == node
| [
"pipermerriam@gmail.com"
] | pipermerriam@gmail.com |
36dcc87f8c06b8bbecf355de34ecf45ed0b067cf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02381/s038525069.py | 90ac1a1d44209344231a259d928a543e94878db6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import statistics
while True:
n = int(input())
if n == 0:
break
else:
data = [float(s) for s in input().split()]
print(statistics.pstdev(data)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ac338c0520a732112f33179ca58a661fc0db6918 | e70a5960b60bf6c11df4248625d0188ededdd4c7 | /Experiment/Threads/multi_process_comu.py | 61a82bcc2e611520826d656a4bc605e1be54a654 | [] | no_license | wx2000qq/MoDeng | 70be2802b6191855667cce5fe3cd89afda5fb9a9 | 9144bb79c237c0361b40f314b2c3123d58ac71cc | refs/heads/master | 2020-11-24T11:52:08.829630 | 2019-12-15T04:54:25 | 2019-12-15T04:54:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # encoding=utf-8
import time
from multiprocessing import Process, Queue, Pool, Manager, Pipe
def producer(pipe):
while True:
pipe.send("a1")
pipe.send("a2")
pipe.send("a3")
pipe.send("a4")
pipe.send("a5")
print('发送了a\n')
if pipe.recv() == 'c':
time.sleep(30)
print('收到c\n')
def consumer(pipe):
while True:
pipe.send("b")
print('发送了b\n')
data = pipe.recv()
if 'a' in data:
# time.sleep(3)
print('收到' + str(data))
if __name__ == "__main__":
# Pipe实现两进程间通信
s_pipe, r_pipe = Pipe()
pool = Pool()
pool.apply_async(producer, args=(s_pipe,))
pool.apply_async(consumer, args=(r_pipe,))
pool.close()
pool.join()
| [
"1210055099@qq.com"
] | 1210055099@qq.com |
050daa77920ef2a17b5a63bb00c545db14e6df14 | a23d449936da4d43f44272077c72fb08b81c6c23 | /src/python/dart/web/api/workflow.py | c06795e3891e8f1250ce4f44135417029840b714 | [
"MIT"
] | permissive | karthich/dart | f048786a4e47d40ab14c948956da12e0c80061c6 | 1681af243ceb35844447e5a1dc853e9ac9da4591 | refs/heads/master | 2021-01-18T16:42:37.208612 | 2016-03-10T21:09:22 | 2016-03-10T21:09:22 | 53,365,420 | 0 | 0 | null | 2016-03-07T23:00:15 | 2016-03-07T23:00:15 | null | UTF-8 | Python | false | false | 5,157 | py | import json
from flask import Blueprint, request, current_app
from flask.ext.jsontools import jsonapi
from dart.model.datastore import DatastoreState
from dart.model.query import Filter, Operator
from dart.model.workflow import Workflow, WorkflowState, WorkflowInstanceState
from dart.service.filter import FilterService
from dart.service.workflow import WorkflowService
from dart.service.trigger import TriggerService
from dart.web.api.entity_lookup import fetch_model
api_workflow_bp = Blueprint('api_workflow', __name__)
@api_workflow_bp.route('/datastore/<datastore>/workflow', methods=['POST'])
@fetch_model
@jsonapi
def post_workflow(datastore):
""" :type datastore: dart.model.datastore.Datastore """
workflow = Workflow.from_dict(request.get_json())
workflow.data.datastore_id = datastore.id
workflow.data.engine_name = datastore.data.engine_name
if datastore.data.state == DatastoreState.ACTIVE:
# only templated datastores can use concurrencies > 1
workflow.data.concurrency = 1
workflow = workflow_service().save_workflow(workflow)
return {'results': workflow.to_dict()}
@api_workflow_bp.route('/workflow', methods=['GET'])
@fetch_model
@jsonapi
def find_workflows():
limit = int(request.args.get('limit', 20))
offset = int(request.args.get('offset', 0))
filters = [filter_service().from_string(f) for f in json.loads(request.args.get('filters', '[]'))]
workflows = workflow_service().query_workflows(filters, limit, offset)
return {
'results': [d.to_dict() for d in workflows],
'limit': limit,
'offset': offset,
'total': workflow_service().query_workflows_count(filters)
}
@api_workflow_bp.route('/workflow/<workflow>', methods=['GET'])
@fetch_model
@jsonapi
def get_workflow(workflow):
return {'results': workflow.to_dict()}
@api_workflow_bp.route('/workflow/<workflow>/instance', methods=['GET'])
@fetch_model
@jsonapi
def find_workflow_instances(workflow):
return _find_workflow_instances(workflow)
@api_workflow_bp.route('/workflow/instance/<workflow_instance>', methods=['GET'])
@fetch_model
@jsonapi
def get_workflow_instance(workflow_instance):
return {'results': workflow_instance.to_dict()}
@api_workflow_bp.route('/workflow/instance', methods=['GET'])
@fetch_model
@jsonapi
def find_instances():
return _find_workflow_instances()
def _find_workflow_instances(workflow=None):
limit = int(request.args.get('limit', 20))
offset = int(request.args.get('offset', 0))
filters = [filter_service().from_string(f) for f in json.loads(request.args.get('filters', '[]'))]
if workflow:
filters.append(Filter('workflow_id', Operator.EQ, workflow.id))
workflow_instances = workflow_service().query_workflow_instances(filters, limit, offset)
return {
'results': [d.to_dict() for d in workflow_instances],
'limit': limit,
'offset': offset,
'total': workflow_service().query_workflow_instances_count(filters)
}
@api_workflow_bp.route('/workflow/<workflow>', methods=['PUT'])
@fetch_model
@jsonapi
def put_workflow(workflow):
""" :type workflow: dart.model.workflow.Workflow """
updated_workflow = Workflow.from_dict(request.get_json())
if updated_workflow.data.state not in [WorkflowState.ACTIVE, WorkflowState.INACTIVE]:
return {'results': 'ERROR', 'error_message': 'state must be ACTIVE or INACTIVE'}, 400, None
workflow = workflow_service().update_workflow_state(workflow, updated_workflow.data.state)
return {'results': workflow.to_dict()}
@api_workflow_bp.route('/workflow/<workflow>/do-manual-trigger', methods=['POST'])
@fetch_model
@jsonapi
def trigger_workflow(workflow):
""" :type workflow: dart.model.workflow.Workflow """
wf = workflow
if wf.data.state != WorkflowState.ACTIVE:
return {'results': 'ERROR', 'error_message': 'This workflow is not ACTIVE'}, 400, None
states = [WorkflowInstanceState.QUEUED, WorkflowInstanceState.RUNNING]
if workflow_service().find_workflow_instances_count(wf.id, states) >= wf.data.concurrency:
return {'results': 'ERROR', 'error_message': 'Max concurrency reached: %s' % wf.data.concurrency}, 400, None
trigger_service().trigger_workflow_async(workflow.id)
return {'results': 'OK'}
@api_workflow_bp.route('/workflow/<workflow>', methods=['DELETE'])
@fetch_model
@jsonapi
def delete_workflow(workflow):
workflow_service().delete_workflow(workflow.id)
return {'results': 'OK'}
@api_workflow_bp.route('/workflow/<workflow>/instance', methods=['DELETE'])
@fetch_model
@jsonapi
def delete_workflow_instances(workflow):
workflow_service().delete_workflow_instances(workflow.id)
return {'results': 'OK'}
def filter_service():
""" :rtype: dart.service.filter.FilterService """
return current_app.dart_context.get(FilterService)
def workflow_service():
""" :rtype: dart.service.workflow.WorkflowService """
return current_app.dart_context.get(WorkflowService)
def trigger_service():
""" :rtype: dart.service.trigger.TriggerService """
return current_app.dart_context.get(TriggerService)
| [
"dmcpherson@rmn.com"
] | dmcpherson@rmn.com |
0ae7ea8ee2142871470e509ea76e48b4cc04ebb3 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2676/61519/321060.py | 13698a303e6e3d7dc416c42d1b0c28d9ef39ceb4 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | n=int(input())
for i in range(n):
num=list(input().split(" "))
m=int(num[0])
k=int(num[1])
number=list(input().split(" "))
for j in range(m):
number[j]=int(number[j])
res=0
tem=[]
for j in range(m-k+1):
res=0
for m in range(j,j+k):
res=res+number[m]
tem.append(res)
print(max(tem)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
de1c55849020f5aae1b29fe8dbf4e8e8ad5417b6 | e614c145ab902ebed09af2bcef5b36dca78a5787 | /sponsors/migrations/0004_sponsor_type.py | d67fcce5597c413d51ceb7787309719a638f43f8 | [] | no_license | rtreharne/pvsat-dev | 1646ca8f51bd466d659b25eb721750de8361ef02 | faa2b28250e2110f4603ffeff80ad0fedda1abbb | refs/heads/master | 2021-01-17T13:24:12.578341 | 2017-09-19T06:42:51 | 2017-09-19T06:42:51 | 44,095,813 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0003_sponsortype'),
]
operations = [
migrations.AddField(
model_name='sponsor',
name='type',
field=models.ForeignKey(default=99, to='sponsors.SponsorType'),
preserve_default=False,
),
]
| [
"R.Treharne@liverpool.ac.uk"
] | R.Treharne@liverpool.ac.uk |
bc53bc984713c48e894ffb39861d5e55a248274f | 2f3be02ae7aabbec751392df769f88f17220f7d5 | /PySetup.py | 65a18f1a5888061aee7f09ccd57b23603f01ea98 | [] | no_license | keul/Cheese-Boys | 730a3dc45f23390e31e802f7524eb1fbde73967f | 6a017bb3124415b67b55979c36edd24c865bda4d | refs/heads/master | 2021-06-01T16:47:37.728144 | 2016-10-22T15:45:38 | 2016-10-22T15:45:38 | 16,045,166 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | #! /usr/bin/env python
# Some of this script is from Pete Shinners pygame2exe script.
# (data copying and pygame icon changing) Special thanks to him!
# import modules
from distutils.core import setup
import sys, os, shutil, pygame
import py2exe
#########################
### Variables to edit ###
#########################
script = "cheeseboys.py" # Starting .py or .pyw script
dest_file = "cheeseboys" # Final name of .exe file
dest_folder = "bin" # Final folder for the files
icon_file = "data/images/cheese_icon.ico" # Icon file. Leave blank for the pygame icon.
extra_data = ["data","docs"] # Extra data to copy in the final folder
extra_modules = ['kezmenu','ktextsurfacewriter'] # Extra modules to be included in the .exe (leave blank if no extra modules)
dll_excludes = [] # excluded dlls ["w9xpopen.exe", "msvcr71.dll"]
# Stuff to show who made it, etc.
name = "Cheese Boys"
license = "GPL"
author = "Keul"
author_email = "lucafbb@gmail.com"
company = "Keul Software"
keywords = ['arcade', '2d', 'rpg', 'adventure', 'roleplaying', 'roguelike', 'engine', 'humor', 'keul', ]
description = u"A humor arcade and roleplaying game played in a future, post-apocalypse world"
from cheeseboys.cblocals import __version__ as version
##################################################################
### Below if you edit variables, you could mess up the program ###
##################################################################
# Run the script if no commands are supplied
if len(sys.argv) == 1:
sys.argv.append("py2exe")
sys.argv.append("-q")
# Use the pygame icon if there's no icon designated
if icon_file is '':
path = os.path.split(pygame.__file__)[0]
icon_resources = '' + os.path.join(path, 'pygame.ico')
# Copy extra data files
def installfile(name):
dst = os.path.join(dest_folder)
print 'copying', name, '->', dst
if os.path.isdir(name):
dst = os.path.join(dst, name)
if os.path.isdir(dst):
shutil.rmtree(dst)
shutil.copytree(name, dst)
elif os.path.isfile(name):
shutil.copy(name, dst)
else:
print 'Warning, %s not found' % name
##############################
### Distutils setup script ###
##############################
# Set some variables for the exe
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
self.icon_file = icon_file
# Set some more variables for the exe
target = Target(
script = script,
icon_resources = [(1, icon_file)],
dest_base = dest_file,
company_name=company,
extra_modules = extra_modules
)
# Run the setup script!
setup(
options = {"py2exe": {"compressed": 1,
"optimize": 2,
"bundle_files": 1,
"dll_excludes": dll_excludes,
"dist_dir": dest_folder}},
zipfile = None,
windows = [target],
name = name,
description = description,
keywords = keywords,
author = author,
author_email = author_email,
version = version,
license = license,
)
# install extra data files
print '\n' # Just a space to make it look nicer :)
for d in extra_data:
installfile(d)
# If everything went okay, this should come up.
print '\n\n\nConversion successful!'
| [
"luca@keul.it"
] | luca@keul.it |
ce1e8f7ce71c3522f1f1a8d3541d41a0682730a2 | fc29ccdcf9983a54ae2bbcba3c994a77282ae52e | /Leetcode_By_Topic/tree-117.py | 6e829b905a75b63854f95fa34f8ef1da2d18e259 | [] | no_license | linnndachen/coding-practice | d0267b197d9789ab4bcfc9eec5fb09b14c24f882 | 5e77c3d7a0632882d16dd064f0aad2667237ef37 | refs/heads/master | 2023-09-03T19:26:25.545006 | 2021-10-16T16:29:50 | 2021-10-16T16:29:50 | 299,794,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | """
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
# O(n) of nodes doing width amount of work (width of each level)
# we can upper bound it to O(n) amount of work for n nodes
# O(n^2) will be loose upper bound, b2b compared this to merge sort
# which is O(nlogn)
class Solution:
def findNext(self, childNode, prev, leftmost):
if childNode:
# prev already set up - not the first node on this level
if prev:
prev.next = childNode
else:
leftmost = childNode
# prev, first node on the next level
prev = childNode
return prev, leftmost
def connect(self, root: 'Node') -> 'Node':
if not root:
return root
leftmost = root
while leftmost:
prev, curr = None, leftmost
# reset to resign a new leftmost node
leftmost = None
# iterate the current level
while curr:
prev, leftmost = self.findNext(curr.left, prev, leftmost)
prev, leftmost = self.findNext(curr.right, prev, leftmost)
# move to next level
curr = curr.next
return root
"""
def connect(self, root: 'Node') -> 'Node':
if not root:
return root
queue = collections.deque([root])
while queue:
levelsize = len(queue)
for i in range(levelsize):
node = queue.popleft()
if i < levelsize - 1:
node.next = queue[0]
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return root
""" | [
"lchen.msc2019@ivey.ca"
] | lchen.msc2019@ivey.ca |
f09c8ad4ca6e819d7e5ccc3f91be4a226056e936 | c3fc0ac185ecdb467a959e79439b1adab53db089 | /code/chapter_11_example_10.py | 626ab691dbfd25c267242a5f80fe6e1f16d38cd8 | [] | no_license | twoscoops/two-scoops-of-django-1.11 | a3b037061ac48aaa1648af31d26bb5955af627af | 5eb68a22ca9c602b0042245f2c9f11a789377e04 | refs/heads/master | 2022-08-22T00:13:02.843450 | 2020-03-04T15:16:58 | 2020-03-04T15:16:58 | 72,587,543 | 478 | 93 | null | 2018-05-05T21:35:22 | 2016-11-02T00:06:10 | Python | UTF-8 | Python | false | false | 1,930 | py | """
Using This Code Example
=========================
The code examples provided are provided by Daniel Greenfeld and Audrey Roy of
Two Scoops Press to help you reference Two Scoops of Django: Best Practices
for Django 1.11. Code samples follow PEP-0008, with exceptions made for the
purposes of improving book formatting. Example code is provided "as is", and
is not intended to be, and should not be considered or labeled as "tutorial
code".
Permissions
============
In general, you may use the code we've provided with this book in your
programs and documentation. You do not need to contact us for permission
unless you're reproducing a significant portion of the code or using it in
commercial distributions. Examples:
* Writing a program that uses several chunks of code from this course does
not require permission.
* Selling or distributing a digital package from material taken from this
book does require permission.
* Answering a question by citing this book and quoting example code does not
require permission.
* Incorporating a significant amount of example code from this book into your
product's documentation does require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Two Scoops of Django: Best Practices for Django 1.11, by Daniel
Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2017 Two Scoops Press
(978-0-692-91572-1)."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact us at info@twoscoopspress.org.
"""
# stores/forms.py
from django import forms
from .models import IceCreamStore
class IceCreamStoreUpdateForm(forms.ModelForm):
# Don't do this! Duplication of the model field!
phone = forms.CharField(required=True)
# Don't do this! Duplication of the model field!
description = forms.TextField(required=True)
class Meta:
model = IceCreamStore
| [
"pydanny@gmail.com"
] | pydanny@gmail.com |
d6f68ed295f962861ed279b48034cdbc57a4f28a | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/torch/storage.py | 68caff85a198c169fbe1eea40600ce0a9d841037 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 4,256 | py | import io
import torch
from ._utils import _type, _cuda
class _StorageBase(object):
is_cuda = False
is_sparse = False
def __str__(self):
content = ' ' + '\n '.join(str(self[i]) for i in range(len(self)))
return content + '\n[{} of size {}]'.format(torch.typename(self), len(self))
def __repr__(self):
return str(self)
def __iter__(self):
return iter(map(lambda i: self[i], range(self.size())))
def __copy__(self):
return self.clone()
def __deepcopy__(self, memo):
memo = memo.setdefault('torch', {})
if self._cdata in memo:
return memo[self._cdata]
new_storage = self.clone()
memo[self._cdata] = new_storage
return new_storage
def __reduce__(self):
b = io.BytesIO()
torch.save(self, b)
return (_load_from_bytes, (b.getvalue(),))
def __sizeof__(self):
return super(_StorageBase, self).__sizeof__() + self.element_size() * self.size()
def clone(self):
"""Returns a copy of this storage"""
device = self.get_device() if self.is_cuda else -1
with torch.cuda.device(device):
return type(self)(self.size()).copy_(self)
def tolist(self):
"""Returns a list containing the elements of this storage"""
return [v for v in self]
def cpu(self):
"""Returns a CPU copy of this storage if it's not already on the CPU"""
return self.type(getattr(torch, self.__class__.__name__))
def double(self):
"""Casts this storage to double type"""
return self.type(type(self).__module__ + '.DoubleStorage')
def float(self):
"""Casts this storage to float type"""
return self.type(type(self).__module__ + '.FloatStorage')
def half(self):
"""Casts this storage to half type"""
return self.type(type(self).__module__ + '.HalfStorage')
def long(self):
"""Casts this storage to long type"""
return self.type(type(self).__module__ + '.LongStorage')
def int(self):
"""Casts this storage to int type"""
return self.type(type(self).__module__ + '.IntStorage')
def short(self):
"""Casts this storage to short type"""
return self.type(type(self).__module__ + '.ShortStorage')
def char(self):
"""Casts this storage to char type"""
return self.type(type(self).__module__ + '.CharStorage')
def byte(self):
"""Casts this storage to byte type"""
return self.type(type(self).__module__ + '.ByteStorage')
def bool(self):
"""Casts this storage to bool type"""
return self.type(type(self).__module__ + '.BoolStorage')
def pin_memory(self):
"""Copies the storage to pinned memory, if it's not already pinned."""
if self.is_cuda:
raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
.format(self.type()))
import torch.cuda
allocator = torch.cuda._host_allocator()
return type(self)(self.size(), allocator=allocator).copy_(self)
def share_memory_(self):
"""Moves the storage to shared memory.
This is a no-op for storages already in shared memory and for CUDA
storages, which do not need to be moved for sharing across processes.
Storages in shared memory cannot be resized.
Returns: self
"""
from torch.multiprocessing import get_sharing_strategy
if self.is_cuda:
pass # CUDA doesn't use POSIX shared memory
elif get_sharing_strategy() == 'file_system':
self._share_filename_()
else:
self._share_fd_()
return self
@classmethod
def _new_shared(cls, size):
"""Creates a new storage in shared memory with the same data type"""
from torch.multiprocessing import get_sharing_strategy
if cls.is_cuda:
return cls(size)
elif get_sharing_strategy() == 'file_system':
return cls._new_using_filename(size)
else:
return cls._new_using_fd(size)
def _load_from_bytes(b):
return torch.load(io.BytesIO(b))
_StorageBase.type = _type
_StorageBase.cuda = _cuda
| [
"rnauhria@gmail.com"
] | rnauhria@gmail.com |
77f96b19fc82747d28dff9afb585067254969688 | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/1-Python-Basics/28-tuples2_20200413204206.py | 283d9124f87eb737f8e962aacb7d776edb7b8de7 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | #Tuples part 2
# my_tuple = (2, 1, 4, 10, 34, 11, 342, 3242, 9000)
# print(my_tuple)
# new_tuple = my_tuple[1:10]
# print(new_tuple)
# x,y,z, *other = (2, 1, 4, 10, 34, 11, 342, 3242, 9000)
# print (other)
one_tuple = (2, 1, 4, 10, 34, 11, 342, 3242, 9000)
#doesnt exist
# print(one_tuple.count(5))
# output is an error
#doesnt exist
# print(one_tuple.index(5))
# output is an error
print(one_tuple)
print(one_tuple.index(10))
print(one_tuple.count(11)) | [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
2c12ba1cbb375896963b0fc5f7b9fd5bd7baf5b4 | 386601e50a618d5f8ecd339e32267f4b47ddf731 | /yo!dqn/record_env.py | c319ff3f7536b12b130ca9a7c9dda0c336bcce68 | [
"MIT"
] | permissive | afcarl/minecraft_rl_tutorial | 4c33fa174ae0af8ace69749ccfa6b3c5beb37506 | b4ed548bd826898bc7eb8a6a8ec986995a525bab | refs/heads/master | 2020-08-28T06:23:46.662072 | 2018-08-24T04:37:53 | 2018-08-24T04:37:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,564 | py | import numpy as np
from minecraft_env import env
from utils.utils import *
from darknet import Darknet
import cv2
import torch.nn as nn
import pickle as pkl
import pandas as pd
CUDA = torch.cuda.is_available()
env = env.MinecraftEnv()
env.init(
allowContinuousMovement=["move", "turn"],
continuous_discrete=False,
videoResolution=[800, 600]
)
num_classes = 80
confidence = 0.5
nms_thesh = 0.4
classes = load_classes('data/coco.names')
print("Loading network.....")
model = Darknet("cfg/yolov3.cfg")
model.load_weights('save_model/yolov3.weights')
print("Network successfully loaded")
model.net_info["height"] = 416
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
video = cv2.VideoWriter('record/mob-fun.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 20, (800, 600))
done = False
write = False
batch_size = 1
for i in range(1):
score = 0
count = 0
env.reset()
while True:
count += 1
env.render(mode="rgb_array")
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
num_states = info['number_of_observations_since_last_state']
num_rewards = info['number_of_rewards_since_last_state']
observation = info['observation']
# print(num_states, num_rewards)
score += reward
obs = np.reshape(obs, (600, 800, 3))
img, origin_img, dim = prep_image(obs, inp_dim)
with torch.no_grad():
prediction = model(img, CUDA)
prediction = write_results(prediction, confidence, num_classes,
nms = True, nms_conf = nms_thesh)
# print('prediction: ', prediction)
if type(prediction) == int:
continue
output = prediction
i += 1
if CUDA:
torch.cuda.synchronize()
try:
output
except NameError:
print("No detections were made")
exit()
dim = (800, 600)
im_dim = torch.FloatTensor(dim)
scaling_factor = torch.min(inp_dim/im_dim)
output[:, [1,3]] -= (inp_dim - scaling_factor*im_dim[0])/2
output[:, [2,4]] -= (inp_dim - scaling_factor*im_dim[1])/2
output[:, 1:5] /= scaling_factor
output[:, [1,3]] = torch.clamp(output[:, [1,3]], 0.0, im_dim[0])
output[:, [2,4]] = torch.clamp(output[:, [2,4]], 0.0, im_dim[1])
colors = pkl.load(open("pallete", "rb"))
def write(x, result):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
img = result
cls = int(x[-1])
label = "{0}".format(classes[cls])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1)
return img
list(map(lambda x: write(x, origin_img), output))
# result = write(output, origin_img)
det_name = 'det/det_' + str(count) + '.jpg'
cv2.imwrite(det_name, origin_img)
if done:
cv2.destroyAllWindows()
video.release()
print(str(i) + 'th episode score is ' + str(score))
break
env.close() | [
"dnddnjs11@naver.com"
] | dnddnjs11@naver.com |
9bf50ee5f5b2d550d5e60792dba2b5536fd60f1d | 16d8f47af768cc2859c4bb4f37ea4438088ebae2 | /features.py | 7bfee18013cae12feebbfd4c8c18c65f42b3e694 | [] | no_license | TaoTeChing/smart_deal_tool | f80db4a5f1b8f80c489dc5fc775d8bcd628d3092 | 3d6dce07eb174200fa3923b3145c407ff813a9da | refs/heads/master | 2020-03-26T14:52:08.909240 | 2018-08-16T09:51:45 | 2018-08-16T09:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | #-*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import const as ct
from qfq import qfq
from common import get_market_name
from cstock import CStock
def MACD(data, fastperiod=12, slowperiod=26, signalperiod=9):
ewma12 = data.ewm(fastperiod).mean()
ewma26 = data.ewm(slowperiod).mean()
dif = ewma12 - ewma26
dea = dif.ewm(signalperiod).mean()
bar = (dif - dea) #有些地方的bar = (dif-dea)*2,但是talib中MACD的计算是bar = (dif-dea) * 1
return dif, dea, bar
def VMACD(price, volume, fastperiod=12, slowperiod=26, signalperiod=9):
svolume = sum(volume)
vprice = np.array(price) * np.array(volume)
vprice = vprice / svolume
return MACD(pd.Series(vprice), fastperiod, slowperiod, signalperiod)
def MA(data, peried):
return data.rolling(peried).mean()
def VMA(price, volume, peried):
svolume = sum(volume)
vprice = np.array(price) * np.array(volume)
vprice = vprice / svolume
return MA(pd.Series(vprice), peried)
if __name__ == "__main__":
code = '601318'
prestr = "1" if get_market_name(code) == "sh" else "0"
cstock = CStock(ct.DB_INFO, code)
data = cstock.get_k_data()
filename = "%s%s.csv" % (prestr, code)
data = pd.read_csv("/data/tdx/history/days/%s" % filename, sep = ',')
data = data[['date', 'open', 'low', 'high', 'close', 'volume', 'amount']]
data = data.sort_index(ascending = False)
data = data.reset_index(drop = True)
info = pd.read_csv("/data/tdx/base/bonus.csv", sep = ',', dtype = {'code' : str, 'market': int, 'type': int, 'money': float, 'price': float, 'count': float, 'rate': float, 'date': int})
info = info[(info.code == code) & (info.type == 1)]
info = info.sort_index(ascending = False)
info = info.reset_index(drop = True)
info = info[['money', 'price', 'count', 'rate', 'date']]
data = qfq(data, code, info)
#print(MACD(data['close']))
#print(MA(data['close'], 5))
#VMA(data['close'], data['volume'], 5)
#print(VMACD(data['close'], data['volume']))
| [
"hellobiek@gmail.com"
] | hellobiek@gmail.com |
1c5d0f02b185d0581edcab13b6507ce31ec59ab2 | 11f54a9d392cdfc3b4cca689c0b5abdbf10625ff | /practice.py | f12791d25557a615fe1a300f32d067883245d8dc | [] | no_license | stheartsachu/Miscellinuous | aa0de96115bea73d49bed50f80e263f31cf9d9ad | 3063233669f7513166b2987e911d662a0fbad361 | refs/heads/master | 2021-01-05T03:51:15.168301 | 2020-02-16T10:13:42 | 2020-02-16T10:13:42 | 240,869,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | from turtle import*
from os import*
import turtle
import os
#background
a = turtle.Screen()
a.title('----GUARDIAN OF THE SPACE----')
a.bgcolor('black')
#border
border=turtle.Turtle()
border.speed(0)
border.color('white')
border.penup()
border.setposition(-300,-300)
border.pendown()
border.pensize(3)
for side in range(4):
border.fd(600)
border.lt(90)
border.hideturtle()
#Create the player
player=turtle.Turtle()
player.color('purple')
player.shape('triangle')
player.penup()
player.speed(0)
player.setposition(0,-250)
player.setheading(90)
playerspeed=15
#create left and right movement for the player
def move_left():
x=player.xcor()
x -= playerspeed
if x<-280:
x=-280
player.setx(x)
def move_right():
x=player.xcor()
x += playerspeed
if x>280:
x=280
player.setx(x)
a.listen()
a.onkey(move_left,"left")
a.onkey(move_right,"right")
| [
"seartsachu@gmail.com"
] | seartsachu@gmail.com |
6e85088a7d94fc2c1b1e925d94e47d373df734ac | afebbb07b2b4eada17a5853c1ce63b4075d280df | /marketsim/_pub/strategy/side/__init__.py | 044b7f60cf3121f60417a891549ffb18eeafb250 | [] | no_license | peter1000/marketsimulator | 8c0a55fc6408b880311d3ad49defc55e9af57824 | 1b677200a9d5323f2970c83f076c2b83d39d4fe6 | refs/heads/master | 2021-01-18T01:39:04.869755 | 2015-03-29T17:47:24 | 2015-03-29T17:47:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | from marketsim.gen._out.strategy.side._timeframe import Timeframe
from marketsim.gen._out.strategy.side._threshold import Threshold
from marketsim.gen._out.strategy.side._fundamental_value import Fundamental_Value
from marketsim.gen._out.strategy.side._side_distribution import Side_distribution
from marketsim.gen._out.strategy.side._pairtrading import PairTrading
from marketsim.gen._out.strategy.side._signal import Signal
from marketsim.gen._out.strategy.side._crossingaverages import CrossingAverages
from marketsim.gen._out.strategy.side._book import book
from marketsim.gen._out.strategy.side._trendfollower import TrendFollower
from marketsim.gen._out.strategy.side._side import Side
from marketsim.gen._out.strategy.side._fundamentalvalue import FundamentalValue
from marketsim.gen._out.strategy.side._rsibis import RSIbis
from marketsim.gen._out.strategy.side._factor import Factor
from marketsim.gen._out.strategy.side._strategy import Strategy
from marketsim.gen._out.strategy.side._book import Book
from marketsim.gen._out.strategy.side._booktodependon import BookToDependOn
from marketsim.gen._out.strategy.side._meanreversion import MeanReversion
from marketsim.gen._out.strategy.side._alpha_2 import Alpha_2
from marketsim.gen._out.strategy.side._source import Source
from marketsim.gen._out.strategy.side._fv import Fv
from marketsim.gen._out.strategy.side._signal_value import Signal_Value
from marketsim.gen._out.strategy.side._alpha_1 import Alpha_1
from marketsim.gen._out.strategy.side._alpha import Alpha
from marketsim.gen._out.strategy.side._noise import Noise
| [
"anton.kolotaev@gmail.com"
] | anton.kolotaev@gmail.com |
c517120b9792bcf792158e69978887dfc047a659 | 3e352bd226ddd11ef6dc3d0bf3507c83975bea98 | /BackendBaggie/products/migrations/0007_auto_20201117_1701.py | a1da464d71d21dbbaeefe699b0b519a4121f2a14 | [
"MIT"
] | permissive | Baggie-App/Updateapi | 221ec01cacddeb98cb148daa4495a5c1d44adbdb | 80f200d7ffd4695e6348ce6bb9a7a31a6b821e77 | refs/heads/main | 2023-01-21T09:33:43.199434 | 2020-12-03T18:01:22 | 2020-12-03T18:01:22 | 316,749,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | # Generated by Django 3.1.1 on 2020-11-17 17:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('productsCategory', '0003_auto_20201112_1638'),
('products', '0006_auto_20201117_1651'),
]
operations = [
migrations.AlterField(
model_name='products',
name='productcategoryID',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='nested_products', to='productsCategory.productscategory'),
),
]
| [
"abdullah.cse11@gmail.com"
] | abdullah.cse11@gmail.com |
04b07627a2776dc9ee8438e45b75bb95848e560b | 3d705ec48c94373817e5f61d3f839988910431e3 | /utility/background_traffic/tcp_client.py | e9a87435c633b99a3183fa8904696eaa8f0271b0 | [] | no_license | namesuqi/zeus | 937d3a6849523ae931162cd02c5a09b7e37ebdd8 | 3445b59b29854b70f25da2950016f135aa2a5204 | refs/heads/master | 2022-07-24T14:42:28.600288 | 2018-03-29T08:03:09 | 2018-03-29T08:03:09 | 127,256,973 | 0 | 0 | null | 2022-07-07T22:57:57 | 2018-03-29T07:53:16 | Python | UTF-8 | Python | false | false | 1,028 | py | # coding=utf-8
"""
TODO: Add description for file
__author__ = 'zengyuetian'
"""
import socket
import time
one_k_data = "a"*200
def create_data(data_length):
block = one_k_data*data_length
return block
if __name__ == "__main__":
HOST = '192.168.1.26'
PORT = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
data1 = create_data(512)
data2 = create_data(256)
data3 = create_data(128)
data4 = create_data(64)
data5 = create_data(32)
data6 = create_data(32)
data_list = [data1, data2, data3, data4, data5, data6]
num = 0
while 1:
t1 = time.time()
for i in range(6):
s.send(data_list[i])
time.sleep(0.05)
t2 = time.time()
delta_t = t2 - t1
if 1-delta_t >= 0:
time.sleep(1-delta_t)
else:
if 1-delta_t < -0.05:
print "slow.......{0}".format(1-delta_t)
num += 1
print "Send {0}".format(num)
s.close() | [
"suqi_name@163.com"
] | suqi_name@163.com |
5c4e6283b3e3e7d0cc49c9e2beb0ac2d95bce6be | b669f3e420085593a307e2ae7fd1a44bc713bdf7 | /setup.py | 3622d67ae7d1fb5ed1b094e9ab90701f065a37a2 | [] | no_license | pylakey/openapi_client_generator | 3958636f4fc5de69f668a7d5f66ef3a9760f5237 | 0d7857b6c8966e752c08b88b9b7c62106bb52ae7 | refs/heads/master | 2023-07-17T20:33:31.458440 | 2021-08-29T20:22:33 | 2021-08-29T20:22:33 | 401,134,874 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | import re
from setuptools import (
find_packages,
setup,
)
with open("README.md") as file:
read_me_description = file.read()
with open("requirements.txt") as r:
requirements = [i.strip() for i in r]
with open("openapi_client_generator/__init__.py", encoding="utf-8") as f:
version = re.findall(r"__version__ = \"(.+)\"", f.read())[0]
setup(
name="openapi_client_generator",
version=version,
license='MIT',
author="Pylakey",
author_email="pylakey@protonmail.com",
description="Async http client generator from OpenAPI schema",
long_description=read_me_description,
long_description_content_type="text/markdown",
packages=find_packages(),
entry_points = {
'console_scripts': ['openapi_client_generator=openapi_client_generator.cli:main'],
},
classifiers=[
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=requirements,
python_requires='>=3.9',
include_package_data=True,
)
| [
"pylakey@protonmail.com"
] | pylakey@protonmail.com |
d18face575b507d03542c06ea699f9c96b6cf1a6 | 18305efd1edeb68db69880e03411df37fc83b58b | /pdb_files_1000rot/kz/1kzn/tractability_500/pymol_results_file.py | d9e55e14dc2ce8dc061f1ee30ae02b3e09449bbc | [] | no_license | Cradoux/hotspot_pipline | 22e604974c8e38c9ffa979092267a77c6e1dc458 | 88f7fab8611ebf67334474c6e9ea8fc5e52d27da | refs/heads/master | 2021-11-03T16:21:12.837229 | 2019-03-28T08:31:39 | 2019-03-28T08:31:39 | 170,106,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,086 | py |
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"12.7530002594":[], "12.7530002594_arrows":[]}
cluster_dict["12.7530002594"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(20.5), float(25.0), float(35.0), float(1.0)]
cluster_dict["12.7530002594_arrows"] += cgo_arrow([20.5,25.0,35.0], [21.768,22.624,36.253], color="blue red", name="Arrows_12.7530002594_1")
cluster_dict["12.7530002594"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(20.0), float(28.5), float(40.5), float(1.0)]
cluster_dict["12.7530002594_arrows"] += cgo_arrow([20.0,28.5,40.5], [21.479,28.092,43.236], color="blue red", name="Arrows_12.7530002594_2")
cluster_dict["12.7530002594"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(21.0), float(23.0), float(41.0), float(1.0)]
cluster_dict["12.7530002594_arrows"] += cgo_arrow([21.0,23.0,41.0], [22.957,21.706,41.506], color="blue red", name="Arrows_12.7530002594_3")
cluster_dict["12.7530002594"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(20.0313017836), float(25.928483719), float(38.4248553708), float(1.0)]
cluster_dict["12.7530002594"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(18.5), float(31.0), float(37.0), float(1.0)]
cluster_dict["12.7530002594_arrows"] += cgo_arrow([18.5,31.0,37.0], [15.894,30.15,38.902], color="red blue", name="Arrows_12.7530002594_4")
cluster_dict["12.7530002594"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(19.0), float(29.0), float(39.5), float(1.0)]
cluster_dict["12.7530002594_arrows"] += cgo_arrow([19.0,29.0,39.5], [16.893,28.666,41.538], color="red blue", name="Arrows_12.7530002594_5")
cmd.load_cgo(cluster_dict["12.7530002594"], "Features_12.7530002594", 1)
cmd.load_cgo(cluster_dict["12.7530002594_arrows"], "Arrows_12.7530002594")
cmd.set("transparency", 0.2,"Features_12.7530002594")
cmd.group("Pharmacophore_12.7530002594", members="Features_12.7530002594")
cmd.group("Pharmacophore_12.7530002594", members="Arrows_12.7530002594")
if dirpath:
f = join(dirpath, "label_threshold_12.7530002594.mol2")
else:
f = "label_threshold_12.7530002594.mol2"
cmd.load(f, 'label_threshold_12.7530002594')
cmd.hide('everything', 'label_threshold_12.7530002594')
cmd.label("label_threshold_12.7530002594", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_12.7530002594', members= 'label_threshold_12.7530002594')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
| [
"cradoux.cr@gmail.com"
] | cradoux.cr@gmail.com |
061ebf4227fd842db0abdec8e1c646e66276d95d | e238db1ae3e641d84af17e9cf6a881eb43b20039 | /pro58.py | b36e03f5fc9760c36b12a631ff910f21d58d593e | [] | no_license | RevathiRathi/Revat | 1aa478f51b147e7b044d7519f54938eda0149619 | 792757fb56243846e3049889bf502014c62d658e | refs/heads/master | 2020-04-15T05:01:36.873956 | 2019-07-18T13:40:34 | 2019-07-18T13:40:34 | 164,406,245 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #rr
n = int(input())
b = []
a = n//2 + n
for i in range(1,n+1):
if i%2==0:
b.append(i)
for i in range(1,n+1):
if i%2!=0:
b.append(i)
for i in range(1,n+1):
if i%2==0:
b.append(i)
print(a)
print(*b)
| [
"noreply@github.com"
] | RevathiRathi.noreply@github.com |
09d76f95d5566669c87bc451cb7dda7b960c8454 | 70376e75205771e32c67c38fcfed5406e9fca677 | /backend/src/processing/user/commit.py | 836cda4ddf9213816474e360c55a6b950adc46f7 | [
"MIT"
] | permissive | vctc/github-trends | 75fc229851febd17154ac99abbb708abd7794171 | e8bcc6ec8d2962fa85c0bd5a6db330adea3ea42a | refs/heads/main | 2023-09-05T06:42:02.502831 | 2021-10-27T06:22:39 | 2021-10-27T06:22:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | from time import sleep
from datetime import datetime
from typing import Any, Dict, List
from src.external.github_api.graphql.template import (
GraphQLErrorAuth,
GraphQLErrorTimeout,
GraphQLErrorMissingNode,
)
from src.external.github_api.rest.repo import get_repo_commits
from src.external.github_api.graphql.commit import get_commits
from src.constants import NODE_CHUNK_SIZE, CUTOFF, BLACKLIST
def get_all_commit_info(
user_id: str,
access_token: str,
name_with_owner: str,
start_date: datetime = datetime.now(),
end_date: datetime = datetime.now(),
) -> List[datetime]:
"""Gets all user's commit times for a given repository"""
owner, repo = name_with_owner.split("/")
data: List[Any] = []
index = 0
while index < 10 and len(data) == 100 * index:
data.extend(
get_repo_commits(
access_token=access_token,
owner=owner,
repo=repo,
user=user_id,
since=start_date,
until=end_date,
page=index + 1,
)
)
index += 1
data = list(
map(
lambda x: [
datetime.strptime(
x["commit"]["committer"]["date"], "%Y-%m-%dT%H:%M:%SZ"
),
x["node_id"],
],
data,
)
)
# sort ascending
data = sorted(data, key=lambda x: x[0])
return data
def _get_commits_languages(
access_token: str, node_ids: List[str], per_page: int = NODE_CHUNK_SIZE
) -> List[Dict[str, Any]]:
all_data: List[Dict[str, Any]] = []
i, retries = 0, 0
while i < len(node_ids):
cutoff = min(len(node_ids), i + per_page)
try:
if retries < 2:
all_data.extend(get_commits(access_token, node_ids[i:cutoff])) # type: ignore
else:
all_data.extend([{} for _ in range(cutoff - i)])
i, retries = i + per_page, 0
except GraphQLErrorMissingNode:
print("GraphQLErrorMissingNode, retrying...")
sleep(1)
retries += 1
except GraphQLErrorTimeout:
print("GraphQLErrorTimeout, retrying...")
sleep(1)
retries += 1
except GraphQLErrorAuth:
print("GraphQLErrorAuth, retrying...")
sleep(1)
retries += 1
return all_data
def get_commits_languages(
access_token: str,
node_ids: List[str],
commit_repos: List[str],
repo_infos: Dict[str, Any],
cutoff: int = CUTOFF,
):
all_data = _get_commits_languages(access_token, node_ids, per_page=NODE_CHUNK_SIZE)
out: List[Dict[str, Dict[str, int]]] = []
for commit, commit_repo in zip(all_data, commit_repos):
out.append({})
if (
"additions" in commit
and "deletions" in commit
and commit["additions"] + commit["deletions"] < cutoff
):
repo_info = repo_infos[commit_repo]["languages"]["edges"]
languages = [x for x in repo_info if x["node"]["name"] not in BLACKLIST]
total_repo_size = sum([language["size"] for language in languages])
for language in languages:
lang_name = language["node"]["name"]
lang_color = language["node"]["color"]
lang_size = language["size"]
additions = round(commit["additions"] * lang_size / total_repo_size)
deletions = round(commit["deletions"] * lang_size / total_repo_size)
if additions > 0 or deletions > 0:
out[-1][lang_name] = {
"additions": additions,
"deletions": deletions,
"color": lang_color,
}
return out
| [
"avgupta456@gmail.com"
] | avgupta456@gmail.com |
bd52e6c52ae21954c1b9a874771874a336d46e03 | 69df30e695e64e4713f4392021611095b4f4a360 | /regex.py | e621b3ea574ffbb1c7c8a2850d29fe356a52f6cf | [] | no_license | cos30degreees/MICH | 42a29ecd908d407b7a6a4ca31ffd6ac6f5a48488 | 46f2e754dbbe0a13ecfe087c727c30919aa98885 | refs/heads/master | 2022-11-29T17:30:26.270674 | 2020-08-16T19:30:54 | 2020-08-16T19:30:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | import os
import re
with open('regex_sum_42sample.txt','r') as file:
text = file.read()
print(text)
numbers = re.findall('[0-9]+',text)
| [
"noreply@github.com"
] | cos30degreees.noreply@github.com |
ac038f84fc0c3602d0031af1a93a7e2c1f9cfd54 | 876c58860fdb6c4d4edfcc4b56e97c2176b67f98 | /src/models/category.py | ee4c6720f2173f2024e12dccdf32e5a17d75be2b | [] | no_license | Otumian-empire/py-quiz | 6dbc7915fc0e22f8de1beb30f2071677b0dc413c | b4ee681837add64b039afda3dfc286995199288a | refs/heads/main | 2023-08-31T04:43:36.778235 | 2021-10-15T04:00:13 | 2021-10-15T04:00:13 | 349,536,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,058 | py | from settings import DatabaseConfig
from utils import placeholder_update
class Category(DatabaseConfig):
"""
>>> cat = Category()
Category Module interface - which inherites the DatabaseConfig class.
Category Table:
- id: int (PK)
- name: str
Methods:
- create
- update
- delete
Class:
- read
"""
def create(self, cat_name: str) -> bool:
"""
>>> Category().create(cat_name:str) -> bool
Create new category. Returns False on failure or when there is IntegrityError else True for success.
"""
sql = "INSERT INTO category (name) VALUES (?)"
values = [cat_name, ]
return self.write(sql, values)
class read(DatabaseConfig):
"""
>>> row = read().one(id: int) -> sqlite3.Row
>>> print(row['id'], row['name'])
Returns a sqlite3.Row passing an integer ID
>>> rows = read().all() -> list
>>> for row in rows:
>>> print(row['id'], row['name'])
Returns a list of sqlite3.Rows else and empty list
Methods:
- one
- all
"""
def one(self, id: int):
"""
>>> row = read().one(id: int) -> sqlite3.Row
>>> print(row['id'], row['name'])
Returns a sqlite3.Row passing an integer ID
"""
sql = " ".join(["SELECT id, name FROM category WHERE",
placeholder_update(["id"])])
values = [id, ]
cur = self.get_cursor()
row = cur.execute(sql, values).fetchone() or []
return row
def all(self):
"""
>>> rows = read().all() -> list
>>> for row in rows:
>>> print(row['id'], row['name'])
Returns a list of sqlite3.Rows else and empty list
"""
sql = "SELECT id, name FROM category"
cur = self.get_cursor()
row = cur.execute(sql).fetchall() or []
return row
def update(self, id: int, **kwargs) -> bool:
"""
>>> Category().update(id: int, **kwargs) -> bool
>>> Category().update(id: int, name: str = "some str") -> bool
Update category, where id is the category's ID. Returns False on failure else True for success.
"""
sql = " ".join(["UPDATE category SET",
placeholder_update(list(kwargs.keys())),
"WHERE",
placeholder_update(["id"])])
values = list(kwargs.values()) + [id]
return self.write(sql, values)
def delete(self, id: int) -> bool:
"""
>>> Category().delete(id: int) -> bool
Delete category by ID. Returns False on failure else True for success.
"""
sql = " ".join(["DELETE FROM category WHERE",
placeholder_update(["id"])])
values = [id, ]
return self.write(sql, values)
| [
"popecan1000@gmail.com"
] | popecan1000@gmail.com |
f9d4022df893e57e0153ddbe3cd0378bc978751d | 82d6e248d6498f53455f9ccb40b6ff9667da8f2e | /Pyspark/maxent_dp/join_op.py | a2fffa3fa8ff39596bd5c41c1f1a9096b6e327c4 | [] | no_license | marvinxu-free/data_analysis | 650ddf35443e66c395c8c503cacc328e547298a5 | 7a552959fd6272a54488c59091fa8b820c3f19ce | refs/heads/master | 2020-03-22T04:00:09.938423 | 2018-07-02T16:32:20 | 2018-07-02T16:32:20 | 139,466,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | # -*- coding: utf-8 -*-
# Project: local-spark
# Author: chaoxu create this file
# Time: 2017/11/17
# Company : Maxent
# Email: chao.xu@maxent-inc.com
"""
this filed used to do some join operation
"""
from pyspark.sql.functions import lit, col
def join_sub(df,sub,col_name):
df_new = df.join(sub,col_name,'inner')
return df_new
def join_state(df1,df2,col_name):
df2 = df2.withColumn("label", lit(1))
df_new = df1.join(df2, col_name, "left_outer")
return df_new
def join_loan(df1,df2,col_name='maxent_id'):
df_new = df1.join(df2, col_name, "left_outer")
return df_new
def join_leftanti(df1, df2, col_name):
df_new = df1.join(df2, col_name, "leftanti")
return df_new
join_inner = join_sub
join_left_outer = join_loan
| [
"marvinxu_free@163.com"
] | marvinxu_free@163.com |
25ac34fe6e24cc1af5fa7b545b57a8545dd68df2 | 54e187ce123daff920027560a83f5bb4f6bea602 | /20190505/模板代码/day4/lianxi04_cookie.py | 0ba76862157d5c6463a9f48940569bd1c6ea7fa1 | [] | no_license | GuoQin8284/PythonCode | 272b01d16ff508685b68c51b523a25e9753162ea | c4016a693c9ab4c798567aabf00135be10beae0c | refs/heads/master | 2023-05-06T03:45:55.667271 | 2021-05-27T15:29:28 | 2021-05-27T15:29:28 | 267,345,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # selenium webdriver
import time
# 0.导入自动化测试工具
from selenium import webdriver
# 1.打开浏览器
# 实例化浏览器驱动对象
# obj = 类名()
driver = webdriver.Chrome()
# 2.输入网址
# 驱动对象调用get("协议://URL")
driver.get("https://www.baidu.com")
# 3.业务操作
# 找到元素,操作元素
# 获取所有cookie
# 获取单个cookie
# 添加cookie
# 添加后查看对应cookie信息
# 4.关闭浏览器
# 驱动对象调用quit()
time.sleep(2)
driver.quit()
| [
"1143908462@qq.com"
] | 1143908462@qq.com |
651ca54f9042e9ff4f1987bdfa4f142368176daa | 2bbb3f0712ad531ec12a06febab13e7e5bdb13cf | /frappe/desk/doctype/module_onboarding/module_onboarding.py | 8315c0b3040982f15bd64d6636262028e95351c1 | [
"MIT"
] | permissive | Monogramm/frappe | b25a31e0ff71fff0acb8ad3eb07b216cb27c11dd | ce117d4aee8978b1231f1c0eca88bd69af9c9cf8 | refs/heads/develop | 2023-09-01T16:32:23.883839 | 2021-02-15T07:53:51 | 2021-02-15T07:53:51 | 191,976,487 | 2 | 0 | MIT | 2021-02-15T11:30:10 | 2019-06-14T16:41:36 | Python | UTF-8 | Python | false | false | 1,416 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.modules.export_file import export_to_files
class ModuleOnboarding(Document):
def on_update(self):
if frappe.conf.developer_mode:
export_to_files(record_list=[['Module Onboarding', self.name]], record_module=self.module)
for step in self.steps:
export_to_files(record_list=[['Onboarding Step', step.step]], record_module=self.module)
def get_steps(self):
return [frappe.get_doc("Onboarding Step", step.step) for step in self.steps]
def get_allowed_roles(self):
all_roles = [role.role for role in self.allow_roles]
if "System Manager" not in all_roles:
all_roles.append("System Manager")
return all_roles
def check_completion(self):
if self.is_complete:
return True
steps = self.get_steps()
is_complete = [bool(step.is_complete or step.is_skipped) for step in steps]
if all(is_complete):
self.is_complete = True
self.save()
return True
return False
def before_export(self, doc):
doc.is_complete = 0
def reset_onboarding(self):
frappe.only_for("Administrator")
self.is_complete = 0
steps = self.get_steps()
for step in steps:
step.is_complete = 0
step.is_skipped = 0
step.save()
self.save()
| [
"scm.mymail@gmail.com"
] | scm.mymail@gmail.com |
852bc34b8e24393ca6c0af72937a595490de35c2 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/02_preprocessing/merraLagScripts/839-tideGauge.py | 77b0218c26a08c884592581356b84cecb680cb19 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,772 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:12:23 2020
****************************************************
Load predictors & predictands + predictor importance
****************************************************
@author: Michael Tadesse
"""
#import packages
import os
import pandas as pd
import datetime as dt #used for timedelta
from datetime import datetime
#define directories
# dir_name = 'F:\\01_erainterim\\03_eraint_lagged_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraAllCombined"
dir_out = "/lustre/fs0/home/mtadesse/merraAllLagged"
def lag():
os.chdir(dir_in)
#get names
tg_list_name = sorted(os.listdir())
x = 839
y = 840
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
pred = pd.read_csv(tg_name)
#create a daily time series - date_range
#get only the ymd of the start and end times
start_time = pred['date'][0].split(' ')[0]
end_time = pred['date'].iloc[-1].split(' ')[0]
print(start_time, ' - ', end_time, '\n')
date_range = pd.date_range(start_time, end_time, freq = 'D')
#defining time changing lambda functions
time_str = lambda x: str(x)
time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date'])
time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp'])
"""
first prepare the six time lagging dataframes
then use the merge function to merge the original
predictor with the lagging dataframes
"""
#prepare lagged time series for time only
#note here that since MERRA has 3hrly data
#the lag_hrs is increased from 6(eraint) to 31(MERRA)
time_lagged = pd.DataFrame()
lag_hrs = list(range(0, 31))
for lag in lag_hrs:
lag_name = 'lag'+str(lag)
lam_delta = lambda x: str(x - dt.timedelta(hours = lag))
lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \
columns = [lag_name])
time_lagged = pd.concat([time_lagged, lag_new], axis = 1)
#datafrmae that contains all lagged time series (just time)
time_all = pd.concat([time_converted_str, time_lagged], axis = 1)
pred_lagged = pd.DataFrame()
for ii in range(1,time_all.shape[1]): #to loop through the lagged time series
print(time_all.columns[ii])
#extracting corresponding tag time series
lag_ts = pd.DataFrame(time_all.iloc[:,ii])
lag_ts.columns = ['date']
#merge the selected tlagged time with the predictor on = "date"
pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right')
pred_new.drop('Unnamed: 0', axis = 1, inplace = True)
#sometimes nan values go to the bottom of the dataframe
#sort df by date -> reset the index -> remove old index
pred_new.sort_values(by = 'date', inplace=True)
pred_new.reset_index(inplace=True)
pred_new.drop('index', axis = 1, inplace= True)
#concatenate lagged dataframe
if ii == 1:
pred_lagged = pred_new
else:
pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1)
#cd to saving directory
os.chdir(dir_out)
pred_lagged.to_csv(tg_name)
os.chdir(dir_in)
#run script
lag()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
ed1b00d150c65432fbe9fe6a171e69b632c2fb65 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/419/usersdata/329/85402/submittedfiles/investimento.py | e6338b1e41b9399e4f47cc2637da3f2f3d6e440a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | # -*- coding: utf-8 -*-
from __future__ import division
#COMECE SEU CODIGO AQUI
v = float(input("digite o valor do investimento inicial="))
i = float(input("digite o laor da taxa= "))
ano = 1
saldo = v
soma = 0
while ano <= 10:
rendimento = (i/100)*v
saldo = saldo+rendimento
print ('%.2f' % saldo)
ano = ano+1
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
1d024af436f8a98066a335089d60efda2a502320 | f44e4485385296f4d1de2032c64c76de37ec5007 | /tests/airplay/test_airplay.py | 26fbebdb29f519b9d33ab0264ccb007c11b80228 | [
"MIT"
] | permissive | kdschlosser/pyatv | 370d0a35e39623b8e8e6a087c675ec47aa50fb16 | fa32dab9ad3c4adffdc944ed78427f6c724074f5 | refs/heads/master | 2022-06-20T06:58:13.608441 | 2020-05-11T04:57:55 | 2020-05-11T06:22:23 | 264,143,600 | 1 | 0 | MIT | 2020-05-15T08:48:06 | 2020-05-15T08:48:05 | null | UTF-8 | Python | false | false | 2,889 | py | """Functional tests for Airplay."""
from aiohttp import ClientSession
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from pyatv import exceptions, net
from pyatv.const import Protocol
from pyatv.airplay import player
from tests.fake_device import FakeAppleTV
STREAM = "http://airplaystream"
START_POSITION = 0.8
class AirPlayPlayerTest(AioHTTPTestCase):
async def setUpAsync(self):
await AioHTTPTestCase.setUpAsync(self)
# This is a hack that overrides asyncio.sleep to avoid making the test
# slow. It also counts number of calls, since this is quite important
# to the general function.
player.asyncio.sleep = self.fake_asyncio_sleep
self.no_of_sleeps = 0
self.session = ClientSession(loop=self.loop)
http = net.HttpSession(
self.session, "http://127.0.0.1:{0}/".format(self.server.port)
)
self.player = player.AirPlayPlayer(self.loop, http)
async def tearDownAsync(self):
await self.session.close()
await AioHTTPTestCase.tearDownAsync(self)
async def get_application(self, loop=None):
self.fake_atv = FakeAppleTV(self.loop)
self.state, self.usecase = self.fake_atv.add_service(Protocol.AirPlay)
return self.fake_atv.app
async def fake_asyncio_sleep(self, time, loop=None):
self.no_of_sleeps += 1
@unittest_run_loop
async def test_play_video(self):
self.usecase.airplay_playback_idle()
self.usecase.airplay_playback_playing()
self.usecase.airplay_playback_idle()
await self.player.play_url(STREAM, position=START_POSITION)
self.assertEqual(self.state.last_airplay_url, STREAM)
self.assertEqual(self.state.last_airplay_start, START_POSITION)
self.assertIsNotNone(self.state.last_airplay_uuid)
self.assertEqual(self.no_of_sleeps, 2) # playback + idle = 3
@unittest_run_loop
async def test_play_video_no_permission(self):
self.usecase.airplay_playback_playing_no_permission()
with self.assertRaises(exceptions.NoCredentialsError):
await self.player.play_url(STREAM, position=START_POSITION)
@unittest_run_loop
async def test_play_with_retries(self):
self.usecase.airplay_play_failure(2)
self.usecase.airplay_playback_playing()
self.usecase.airplay_playback_idle()
await self.player.play_url(STREAM, position=START_POSITION)
self.assertEqual(self.state.play_count, 3) # Two retries + success
@unittest_run_loop
async def test_play_with_too_many_retries(self):
self.usecase.airplay_play_failure(10)
self.usecase.airplay_playback_playing()
self.usecase.airplay_playback_idle()
with self.assertRaises(exceptions.PlaybackError):
await self.player.play_url(STREAM, position=START_POSITION)
| [
"pierre.staahl@gmail.com"
] | pierre.staahl@gmail.com |
352868c35b0f4982843d580053421a28ff961c40 | 065e4cdb3b79c3697f323cbc3d29a79ca696b47f | /src/stomp/transport/subscriptions.py | aa0c30f75b5f211868eba9987551d757d547da88 | [] | no_license | sousouindustries/python-stomp | 59aaa47884013ebdc3bfb6c7f4756ef3ee03547e | b2de7aa2f1658eaa49bffd977bd1c9630ef58f0c | refs/heads/master | 2021-01-10T03:35:04.103347 | 2016-01-08T16:35:10 | 2016-01-08T16:35:10 | 44,618,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,201 | py | import collections
import logging
import threading
try:
import queue
except ImportError:
import Queue as queue
from stomp.const import MESSAGE
from stomp.const import HDR_ID
from stomp.const import HDR_SUBSCRIPTION
from stomp.frames import UnsubscribeFrame
from stomp.transport.message import Message
class SubscriptionManager(object):
"""Manages subscriptions for a session with a ``STOMP`` server."""
def __init__(self, session, connection, message_factory=None):
self.session = session
self.connection = connection
connection.register_observer(self)
self.subscriptions = collections.OrderedDict()
self.logger = logging.getLogger('stomp.session')
self.message_factory = message_factory or Message.fromframe
def add(self, sid, destinations):
"""Register a new subscription."""
assert sid not in self.subscriptions
self.subscriptions[sid] = Subscription(
self, sid, destinations)
return self.subscriptions[sid]
def notify(self, event, frame, *args, **kwargs):
c = self.connection
if event == c.EVNT_FRAME_RECV and frame.is_message():
assert frame.command == MESSAGE
assert frame.has_header(HDR_SUBSCRIPTION)
assert frame.headers[HDR_SUBSCRIPTION] in self.subscriptions
sid = frame.headers[HDR_SUBSCRIPTION]
self.subscriptions[sid].put(
self.message_factory(self.connection, self, frame)
)
raise c.DiscardFrame
# if event == c.EVNT_RECONNECT:
# # On reconnect, re-subscribe for all subscriptions.
# for sub in self.subscriptions.values():
# sub.create(self.session.subscribe)
def destroy(self, sid):
"""Destroy the :class:`Subscription` identified by `sid` and stop
receiving messages for it.
"""
if sid in self.subscriptions:
sub = self.subscriptions.pop(sid)
self.connection.send_frame(sub.unsubscribe_frame)
def __iter__(self):
return iter(self.subscriptions.values())
class Subscription(object):
"""Represents a subscription to one or more channels."""
@property
def unsubscribe_frame(self):
frame = UnsubscribeFrame(with_receipt=True)
frame.set_header(HDR_ID, self.sid)
return frame
@property
def message_count(self):
return self._messages_received
@property
def frame_count(self):
return self._frame_count
@property
def messages(self):
"""Return all messages received by this subscription."""
while True:
try:
yield self.queue.get(False)
except queue.Empty:
break
else:
self.queue.task_done()
def __init__(self, manager, sid, destinations):
self.manager = manager
self.sid = sid
self.destinations = destinations
self.queue = queue.Queue()
self.seen = collections.deque([], 1000)
self._messages_received = 0
self._frame_count = 0
self._events = collections.OrderedDict()
def put(self, msg):
self._frame_count += 1
if msg.mid not in self.seen:
self._messages_received += 1
self.seen.append(msg.mid)
self.queue.put(msg)
if self._events:
self._events.pop(list(self._events.keys())[0]).set()
def wait(self, timeout=None):
"""Block until a message has been received on this :class:`Subscription`.
Return a boolean indicating if a message was received during the specified
timeframe `timeout`.
"""
t = threading.current_thread()
if timeout is not None:
timeout = int(timeout / 1000)
self._events[t.ident] = threading.Event()
return self._events[t.ident].wait(timeout)
def destroy(self):
"""Stop receiving frames for this :class:`Subscription` and remove
it from the registry.
"""
return self.manager.destroy(self.sid)
def __iter__(self):
return iter(self.messages)
| [
"cochiseruhulessin@gmail.com"
] | cochiseruhulessin@gmail.com |
531cf00edb87316c424d0c0facc6fbadc234a621 | 1e0b77feea4aa08f2aa9ff63feddbc818428a350 | /script/s-cms/scms_download.py | 30b434d64baf717d560ff23648b74db390efed43 | [] | no_license | cleanmgr112/Tentacle | 838b915430166429da3fe4ed290bef85d793fae4 | 175e143fc08d1a6884a126b7da019ef126e116fa | refs/heads/master | 2022-12-08T06:36:28.706843 | 2020-08-26T14:06:35 | 2020-08-26T14:06:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: 'orleven'
from lib.utils.connect import ClientSession
from lib.core.enums import VUL_LEVEL
from lib.core.enums import VUL_TYPE
from lib.core.enums import SERVICE_PORT_MAP
from script import Script
class POC(Script):
def __init__(self, target=None):
self.service_type = SERVICE_PORT_MAP.WEB
self.name = 's-cms download'
self.keyword = ['s-cms', 'download']
self.info = 's-cms download'
self.type = VUL_TYPE.INFO
self.level = VUL_LEVEL.HIGH
self.refer = 'https://xz.aliyun.com/t/3614'
Script.__init__(self, target=target, service_type=self.service_type)
async def prove(self):
await self.get_url()
if self.base_url:
headers = {
"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/534.4 (KHTML, like Gecko) Chrome/6.0.481.0 Safari/534.4",
"Cookie": "user=%;pass=%;"
}
path_list = list(set([
self.url_normpath(self.base_url, '/'),
self.url_normpath(self.url, './'),
]))
async with ClientSession() as session:
for path in path_list:
poc = "index.php"
url = path +"admin/download.php?DownName=%s" % poc.replace("h","H")
async with session.get(url=url, headers = headers) as res:
if res != None:
text = await res.text()
if '<?php' in text:
self.flag = 1
self.req.append({"url": url})
self.res.append({"info": url, "key": "s-cms download", 'connect': text})
return | [
"546577246@qq.com"
] | 546577246@qq.com |
0da74966eb6f0978a5e9d2a529d43f64c388c38c | f45cc0049cd6c3a2b25de0e9bbc80c25c113a356 | /LeetCode/深度优先搜索(dfs)/岛屿问题/695. Max Area of Island.py | 742aa29befb5e3615195b2b6f1f07a715554183a | [] | no_license | yiming1012/MyLeetCode | 4a387d024969bfd1cdccd4f581051a6e4104891a | e43ee86c5a8cdb808da09b4b6138e10275abadb5 | refs/heads/master | 2023-06-17T06:43:13.854862 | 2021-07-15T08:54:07 | 2021-07-15T08:54:07 | 261,663,876 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,497 | py | '''
Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's (representing land) connected 4-directionally (horizontal or vertical.) You may assume all four edges of the grid are surrounded by water.
Find the maximum area of an island in the given 2D array. (If there is no island, the maximum area is 0.)
Example 1:
[[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Given the above grid, return 6. Note the answer is not 11, because the island must be connected 4-directionally.
Example 2:
[[0,0,0,0,0,0,0,0]]
Given the above grid, return 0.
Note: The length of each dimension in the given grid does not exceed 50.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/max-area-of-island
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
from typing import List
class Solution:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
"""
思路:dfs
1. 如果某一个点为1,则搜索该点周围四个方向的点,使得为1的点形成一个连通区域,并将访问过的1赋值为2
2. 统计每个连通区域1的个数,并计算最大值
@param grid:
@return:
"""
if not grid:
return 0
m, n = len(grid), len(grid[0])
pos = [(0, 1), (0, -1), (1, 0), (-1, 0)]
def dfs(i, j):
grid[i][j] = 2
ans = 1
for x, y in pos:
a, b = i + x, j + y
if 0 <= a < m and 0 <= b < n and grid[a][b] == 1:
ans += dfs(a, b)
return ans
res = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
res = max(res, dfs(i, j))
return res
if __name__ == '__main__':
grid = [[0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]]
print(Solution().maxAreaOfIsland(grid))
| [
"1129079384@qq.com"
] | 1129079384@qq.com |
e46753e73303275971e86bbddc696c0a2ccb0653 | 3c5c4c4fb296d08e9e984c4a60ae4fa147293e9a | /ceres/wallet/wallet_sync_store.py | 3e8fcb3e7420d9fc7493536b0253e54a445194da | [
"Apache-2.0"
] | permissive | signingup/ceres-combineharvester | a8874ab11145e7ba2223b85483b96dea01054ad0 | aad918a03a4a522e0e2f3bac104d19d693d6bf79 | refs/heads/main | 2023-07-25T04:11:13.765471 | 2021-09-09T14:59:48 | 2021-09-09T14:59:48 | 404,918,382 | 1 | 0 | Apache-2.0 | 2021-09-10T01:22:20 | 2021-09-10T01:22:20 | null | UTF-8 | Python | false | false | 3,201 | py | import asyncio
import logging
from typing import Dict, List, Optional, Tuple
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.header_block import HeaderBlock
from ceres.util.ints import uint32
log = logging.getLogger(__name__)
class WalletSyncStore:
# Whether or not we are syncing
sync_mode: bool
# Whether we are waiting for peaks (at the start of sync) or already syncing
waiting_for_peaks: bool
# Potential new peaks that we have received from others.
potential_peaks: Dict[bytes32, HeaderBlock]
# Blocks received from other peers during sync
potential_blocks: Dict[uint32, HeaderBlock]
# Event to signal when blocks are received at each height
potential_blocks_received: Dict[uint32, asyncio.Event]
# Blocks that we have finalized during sync, queue them up for adding after sync is done
potential_future_blocks: List[HeaderBlock]
# A map from height to header hash of blocks added to the chain
header_hashes_added: Dict[uint32, bytes32]
# map from potential peak to fork point
peak_fork_point: Dict[bytes32, uint32]
@classmethod
async def create(cls) -> "WalletSyncStore":
self = cls()
self.sync_mode = False
self.waiting_for_peaks = True
self.potential_peaks = {}
self.potential_blocks = {}
self.potential_blocks_received = {}
self.potential_future_blocks = []
self.header_hashes_added = {}
self.peak_fork_point = {}
return self
def set_sync_mode(self, sync_mode: bool) -> None:
self.sync_mode = sync_mode
def get_sync_mode(self) -> bool:
return self.sync_mode
async def clear_sync_info(self):
self.potential_peaks.clear()
self.potential_blocks.clear()
self.potential_blocks_received.clear()
self.potential_future_blocks.clear()
self.header_hashes_added.clear()
self.waiting_for_peaks = True
self.peak_fork_point = {}
def get_potential_peaks_tuples(self) -> List[Tuple[bytes32, HeaderBlock]]:
return list(self.potential_peaks.items())
def add_potential_peak(self, block: HeaderBlock) -> None:
self.potential_peaks[block.header_hash] = block
def add_potential_fork_point(self, peak_hash: bytes32, fork_point: uint32):
self.peak_fork_point[peak_hash] = fork_point
def get_potential_fork_point(self, peak_hash) -> Optional[uint32]:
if peak_hash in self.peak_fork_point:
return self.peak_fork_point[peak_hash]
else:
return None
def get_potential_peak(self, header_hash: bytes32) -> Optional[HeaderBlock]:
return self.potential_peaks.get(header_hash, None)
def add_potential_future_block(self, block: HeaderBlock):
self.potential_future_blocks.append(block)
def get_potential_future_blocks(self):
return self.potential_future_blocks
def add_header_hashes_added(self, height: uint32, header_hash: bytes32):
self.header_hashes_added[height] = header_hash
def get_header_hashes_added(self, height: uint32) -> Optional[bytes32]:
return self.header_hashes_added.get(height, None)
| [
"hulatang_eric@163.com"
] | hulatang_eric@163.com |
8a8ad854276f8112b4b267ecf2dc53a3ca6e4eea | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/579074_Consumer_Application_Skeleton/recipe-579074.py | 819580f02338acb4cb42b0dbf1adbe8177894293 | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 7,668 | py | """This is very basic skeleton for data processing application implementing
consumer pattern.
"""
__author__ = 'vovanec@gmail.com'
from concurrent.futures import ThreadPoolExecutor
import functools
import logging
import signal
import threading
DEFAULT_NUM_WORKERS = 16
class ConsumerAppBase(object):
"""Base class for task consumer application.
"""
sleep_timeout = 3
def __init__(self, app_name, num_workers=DEFAULT_NUM_WORKERS):
"""Constructor.
:param str app_name: application name.
:param int num_workers: number of worker threads.
"""
self._log = logging.getLogger(app_name)
self._app_name = app_name
self._stop_event = threading.Event()
self._task_executor = ThreadPoolExecutor(max_workers=num_workers)
self._task_limiter = threading.Semaphore(value=num_workers)
def run(self):
"""Run application.
"""
exit_status = 0
self._install_signal_handlers()
try:
self._on_start()
self._main_loop()
except BaseException as exc:
self._log.exception('Unrecoverable exception in %s main loop. '
'Exiting: %s', self._app_name, exc)
exit_status = 1
finally:
self._stop_event.set()
self._stop_task_executor()
self._on_stop()
self._log.info('Done.')
return exit_status
def stop(self):
"""Tell the main loop to stop and shutdown.
"""
self._stop_event.set()
def _get_next_task(self):
"""Get next task for processing. Subclasses MUST implement this method.
:return: next task object. If None returned - framework assumes the
input queue is empty and waits for TaskProcessor.sleep_timeout
time before calling to _get_next_task() again.
:rtype: object|None
"""
raise NotImplementedError
def _run_task(self, task):
"""Run task in separate worker thread of ThreadPoolExecutor.
Subclasses MUST implement this method.
:param object task: task item.
:rtype: object|None
:return: this method should return task execution result, that will be
available in _on_task_done() callback wrapped in
concurrent.futures.Future object.
"""
raise NotImplementedError
def _on_task_done(self, task, future):
"""This callback is being called after task finished.
Subclasses may implement this method to handle tasks results,
perform some cleanup etc.
:param object task: task item.
:param concurrent.futures.Future future: future that wraps
task execution result.
:rtype: None
"""
pass
def _on_start(self):
"""Subclasses may re-implement this method to add custom logic on
application start, right before entering the main loop.
"""
pass
def _on_stop(self):
"""Subclasses may re-implement this method to add custom logic on
application stop, right after exiting the main loop.
"""
pass
# Private methods
def _install_signal_handlers(self):
"""Install signal handlers for the process.
"""
self._log.info('Installing signal handlers')
def handler(signum, _):
"""Signal handler.
"""
self._log.info('Got signal %s', signum)
self._stop_event.set()
for sig in (signal.SIGHUP, signal.SIGINT, signal.SIGTERM,
signal.SIGQUIT, signal.SIGABRT):
signal.signal(sig, handler)
def _main_loop(self):
"""Main loop.
"""
self._log.info('Entering the main loop')
while not self._stop_event.is_set():
# Try to get the next task. If exception occurred - wait and retry.
try:
task = self._get_next_task()
except Exception as exc:
self._log.exception(
'Failed to get next task for processing: %s. Sleeping '
'for %s seconds before retry.', exc, self.sleep_timeout)
self._stop_event.wait(self.sleep_timeout)
continue
# If task is None - wait and retry to get the next task.
if task is None:
self._log.info(
'Task queue is empty. Sleeping for %s seconds before '
'retry.', self.sleep_timeout)
self._stop_event.wait(self.sleep_timeout)
continue
self._log.debug('Got next task for processing: %s', task)
if self._submit_task(task):
self._log.debug('Successfully submitted task %s for processing',
task)
else:
# Submission was interrupted because application
# has been told to stop. Exit the main loop.
break
self._log.info('%s has been told to stop. Exiting.', self._app_name)
def _submit_task(self, task):
"""Submit task to the pool executor for processing.
:param object task: task item.
:return: True if submission was successful, False if submission was
interrupted because application is about to exit.
:rtype: bool
"""
while not self._stop_event.is_set():
if self._task_limiter.acquire(blocking=False):
try:
task_done_cb = functools.partial(self._task_done, task)
self._task_executor.submit(
self._run_task, task).add_done_callback(task_done_cb)
return True
except Exception as exc:
self._task_limiter.release()
self._log.exception(
'Could not submit task for processing: %s. '
'Sleeping for %s seconds before next try.',
exc, self.sleep_timeout)
else:
self._log.info(
'No free workers. Sleeping for %s seconds before next try.',
self.sleep_timeout)
self._stop_event.wait(self.sleep_timeout)
return False
def _task_done(self, task, future):
"""Called when task is done.
:param object task: task item.
:param concurrent.futures.Future future: future object.
"""
self._task_limiter.release()
self._on_task_done(task, future)
def _stop_task_executor(self):
"""Stop task executor instance.
"""
if self._task_executor:
self._log.info('Stopping task executor')
try:
self._task_executor.shutdown(wait=True)
except Exception as exc:
self._log.exception(
'Exception while trying to stop task executor: %s', exc)
def main():
"""Example application.
"""
class ExampleApp(ConsumerAppBase):
"""Example application.
"""
def _get_next_task(self):
import random
import time
time.sleep(.01)
return random.randint(0, 1000)
def _run_task(self, task):
return task / 2
def _on_task_done(self, task, future):
self._log.info('Task done. Result: %s', future.result())
logging.basicConfig(level=logging.DEBUG)
ExampleApp('example').run()
if __name__ == '__main__':
main()
| [
"betty@qburst.com"
] | betty@qburst.com |
d326165ea00df991a6e4d9d5d98b1c1e5b8f6bd2 | bc54edd6c2aec23ccfe36011bae16eacc1598467 | /simscale_sdk/models/dimensional_partial_vector_function_length.py | c2477fbea0c198de46734af34b5af463ce9ae17f | [
"MIT"
] | permissive | SimScaleGmbH/simscale-python-sdk | 4d9538d5efcadae718f12504fb2c7051bbe4b712 | 6fe410d676bf53df13c461cb0b3504278490a9bb | refs/heads/master | 2023-08-17T03:30:50.891887 | 2023-08-14T08:09:36 | 2023-08-14T08:09:36 | 331,949,105 | 17 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,583 | py | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class DimensionalPartialVectorFunctionLength(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'value': 'PartialVectorFunction',
'unit': 'str'
}
attribute_map = {
'value': 'value',
'unit': 'unit'
}
def __init__(self, value=None, unit=None, local_vars_configuration=None): # noqa: E501
"""DimensionalPartialVectorFunctionLength - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._value = None
self._unit = None
self.discriminator = None
if value is not None:
self.value = value
self.unit = unit
@property
def value(self):
"""Gets the value of this DimensionalPartialVectorFunctionLength. # noqa: E501
:return: The value of this DimensionalPartialVectorFunctionLength. # noqa: E501
:rtype: PartialVectorFunction
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this DimensionalPartialVectorFunctionLength.
:param value: The value of this DimensionalPartialVectorFunctionLength. # noqa: E501
:type: PartialVectorFunction
"""
self._value = value
@property
def unit(self):
"""Gets the unit of this DimensionalPartialVectorFunctionLength. # noqa: E501
:return: The unit of this DimensionalPartialVectorFunctionLength. # noqa: E501
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit):
"""Sets the unit of this DimensionalPartialVectorFunctionLength.
:param unit: The unit of this DimensionalPartialVectorFunctionLength. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and unit is None: # noqa: E501
raise ValueError("Invalid value for `unit`, must not be `None`") # noqa: E501
allowed_values = ["m", "in"] # noqa: E501
if self.local_vars_configuration.client_side_validation and unit not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `unit` ({0}), must be one of {1}" # noqa: E501
.format(unit, allowed_values)
)
self._unit = unit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DimensionalPartialVectorFunctionLength):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DimensionalPartialVectorFunctionLength):
return True
return self.to_dict() != other.to_dict()
| [
"simscale"
] | simscale |
cfcb7eff8db5283ef0e36e4c97e4697cf870f136 | 11b0c124262ac40de87d756389082462d8452e4d | /ml/m17_pca_mnist2.py | 7b6f1c3202c5a500e8458c7c7ee0f21900613c2c | [] | no_license | Hyunwoo29/keras01 | 164b519f9bb70d55f7bfa91c66529cee4d012b24 | 494611a94420f8e268c37085ccf0b0de4aa71048 | refs/heads/main | 2023-08-06T07:19:13.288634 | 2021-10-05T14:03:56 | 2021-10-05T14:03:56 | 383,744,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from tensorflow.keras.datasets import mnist
(x_train, y_train) , (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape) # (60000,28,28) (60000,) => 60000,28,28,1
print(x_test.shape, y_test.shape) # (10000,28,28) (10000,) => 10000,28,28,1
print(x_train[0]) #
print("y_train = ", y_train[0]) # 5
print(x_train[0].shape) #(28, 28)
x_train = x_train.reshape(60000,28*28).astype('float32')/255. # 이미지의 전처리 (max값이 255기때문에 255로 나눠서
#0~1 사이로 만듦)
x_test = x_test.reshape(10000,28*28)/255.
#(x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],1) ) # x_test = x_train.reshape(10000,28,28,1)/255.
#OneHotEncoding
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
one = OneHotEncoder()
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
one.fit(y_train)
y_train = one.transform(y_train).toarray()
y_test = one.transform(y_test).toarray()
print(y_train.shape) # (60000,10)
print(y_test.shape) # (10000,10)
pca = PCA(n_components=154)
x_train = pca.fit_transform(x_train) # merge fit,transform
x_test = pca.transform(x_test)
print(x_train.shape)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(154,)))
model.add(Dense(256, activation= 'relu'))
model.add(Dense(128, activation= 'relu'))
model.add(Dense(64, activation= 'relu'))
model.add(Dense(32, activation= 'relu'))
model.add(Dense(10, activation='softmax'))
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss', patience= 5, mode = 'auto')
model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.fit(x_train,y_train, epochs=100, batch_size=120, validation_split=0.2,
callbacks = early_stopping)
#4. evaluate , predict
loss = model.evaluate(x_test,y_test, batch_size=1)
print("loss : ",loss)
y_predict = model.predict(x_test)
y_Mpred = np.argmax(y_predict,axis=-1)
print("y_test : ",y_test[:10])
print("y_test : ",y_test[:10])
| [
"nbaksa3@gmail.com"
] | nbaksa3@gmail.com |
4e603cea5edac2fec64ed72506c53373e795cab1 | 8986e7d2bfe18ceccf8b7f3e514a09c069a5ec35 | /websockets/routing.py | d9c08eac6387d7627a3a5a4e4d1ce08dc7e3c9ae | [] | no_license | platzhersh/django-channels-test | 1df3a446d792817ba33c40b95d0d2fd3b818c394 | 506e3fcf31e4b2f54e559ef89fbd19ac924c4837 | refs/heads/master | 2020-04-19T09:30:11.518671 | 2016-09-10T08:19:22 | 2016-09-10T08:19:22 | 67,833,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | from channels.routing import route
from websockets.consumers import ws_add, ws_message, ws_disconnect
channel_routing = [
route("websocket.connect", ws_add),
route("websocket.receive", ws_message),
route("websocket.disconnect", ws_disconnect),
] | [
"chregi.glatthard@gmail.com"
] | chregi.glatthard@gmail.com |
56e6f548e9ca2b32de5a717cb7d7cab388416443 | 570e54d08cc2059ced4938bcb259d69a6bb30108 | /app/core/migrations/0002_auto_20210702_1553.py | cb134be0aa52fed599916f2f13b35324bd39db34 | [] | no_license | coconutcake/grp_scheduler | b73e8931435106595276dc8099262620b96cfc58 | 62dea94e4ac8d705b6f13a8694f9a6201e944145 | refs/heads/main | 2023-06-19T03:42:13.869659 | 2021-07-02T14:17:30 | 2021-07-02T14:17:30 | 380,989,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 3.1.3 on 2021-07-02 15:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'verbose_name': 'Konto użytkownika', 'verbose_name_plural': 'Konta użytkowników'},
),
]
| [
"contact@mign.pl"
] | contact@mign.pl |
21fa7945fd42d6dc7995f7bf27efce7e4f5f7a44 | 058f6cf55de8b72a7cdd6e592d40243a91431bde | /tracing_tool/expressions_parser.py | c6cdd587e14cf9b995b38605a04354381ffe6cad | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/FPChecker | 85e8ebf1d321b3208acee7ddfda2d8878a238535 | e665ef0f050316f6bc4dfc64c1f17355403e771b | refs/heads/master | 2023-08-30T23:24:43.749418 | 2022-04-14T19:57:44 | 2022-04-14T19:57:44 | 177,033,795 | 24 | 6 | Apache-2.0 | 2022-09-19T00:09:50 | 2019-03-21T22:34:14 | Python | UTF-8 | Python | false | false | 3,496 | py | #!/usr/bin/env python3
import tempfile
import shutil
import os
import sys
import re
from collections import deque
expressionPattern_EQ = re.compile(r"(.*?)([a-zA-Z0-9_\[\]\s]+)(\+?\=)([a-zA-Z0-9_\.\[\]\(\)\+\-\*\/\n\s]+);[\s]*\n", re.DOTALL)
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
class ParseExpressions:
windowSize = 10
modifiedFile = []
fileName = ''
def __init__(self, fileName):
self.fileName = fileName
previous_lines = deque(maxlen=self.windowSize)
line_numbers = deque(maxlen=self.windowSize)
with open(fileName, 'r') as fd:
number = 0
for line in fd:
number += 1
previous_lines.append(line)
line_numbers.append(number)
if len(previous_lines) == self.windowSize:
linesMatched = self.parseWindow(previous_lines, line_numbers)
for i in range(linesMatched):
if len(previous_lines) > 0:
previous_lines.popleft()
line_numbers.popleft()
#print('Final QUEUE:\n', previous_lines)
self.saveFinalFile(previous_lines)
def saveFinalFile(self, previous_lines):
tmpFd, tmpFname = tempfile.mkstemp(suffix='.txt', text=True)
with open(tmpFname, 'w') as f:
for l in self.modifiedFile:
f.write(l)
for l in list(previous_lines):
f.write(l)
os.close(tmpFd)
# Copy tmp file to original file
shutil.copy2(tmpFname, self.fileName)
# Remove tmp file
os.remove(tmpFname)
def consumedLines(self, win, expression):
wholeWindow = ''.join(list(win))
idx = wholeWindow.find(expression)
lines = wholeWindow[:idx+len(expression)+1].count('\n')
return lines
def matchPattern(self, fullLine):
foundPattern = expressionPattern_EQ.search(fullLine)
if foundPattern:
return foundPattern
return None
def RHSIsValid(self, RHS):
if (not representsInt(RHS) and
'default' not in RHS and
'NULL' not in RHS and 'nullptr' not in RHS and
'true' not in RHS and 'false' not in RHS and
'//' not in RHS and '/*' not in RHS and '*/' not in RHS):
return True
return False
def LHSIsValid(self, PRE, LHS):
if ('&' not in PRE+LHS):
return True
return False
def parseWindow(self, win, line_numbers):
fullLine = ''.join(list(win))
foundPattern = self.matchPattern(fullLine)
#print("\nWINDOW:", fullLine)
if foundPattern:
block = foundPattern.group(0)
expression = foundPattern.group(2) + foundPattern.group(3)
if ('{' not in expression and
'}' not in expression and
'using' not in expression):
PRE = foundPattern.group(1)
LHS = foundPattern.group(2)
EQS = foundPattern.group(3) # Equal sign
RHS = foundPattern.group(4)
if self.RHSIsValid(RHS) and self.LHSIsValid(PRE, LHS):
consumed = self.consumedLines(win, block)
lineNumber = str(list(line_numbers)[consumed-1])
# Example: double _FPC_CHECK_(double x, int loc, const char *fileName)
newLine = PRE + LHS + EQS + ' _FPC_CHECK_MACRO_(' + RHS + ', ' + lineNumber + ', \"' + self.fileName[-25:] + '..."); \n'
self.modifiedFile.append(newLine)
return consumed
self.modifiedFile.append(fullLine)
return self.windowSize
if __name__ == "__main__":
fileName = sys.argv[1]
print('Transforming file:', fileName+'...')
exp = ParseExpressions(fileName)
| [
"ilaguna@llnl.gov"
] | ilaguna@llnl.gov |
eaec0dbf2a84925649108d199b5ace5e4e65d294 | 63b7671b5296b97aa5271d076540e0d81b85d599 | /strongr/schedulerdomain/model/scalingdrivers/scalingdriver.py | c6ae817802838c9c1fcaecb00a60b1cdde36ad6c | [
"Apache-2.0"
] | permissive | bigr-erasmusmc/StrongR | bd30d7f41c1fef87bd241c0c4ea059f88c5c426e | 48573e170771a251f629f2d13dba7173f010a38c | refs/heads/master | 2021-10-10T10:17:15.344510 | 2018-11-20T10:21:50 | 2018-11-20T10:21:50 | 112,336,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | import dependency_injector.containers as containers
import dependency_injector.providers as providers
import strongr.core
from strongr.schedulerdomain.model.scalingdrivers.nullscaler import NullScaler
from strongr.schedulerdomain.model.scalingdrivers.simplescaler import SimpleScaler
from strongr.schedulerdomain.model.scalingdrivers.surfhpccloudscaler import SurfHpcScaler
class ScalingDriver(containers.DeclarativeContainer):
"""IoC container of service providers."""
_scalingdrivers = providers.Object({
'simplescaler': SimpleScaler,
'nullscaler': NullScaler,
'surfsarahpccloud': SurfHpcScaler
})
scaling_driver = providers.Singleton(_scalingdrivers()[strongr.core.Core.config().schedulerdomain.scalingdriver.lower()], config=dict(strongr.core.Core.config().schedulerdomain.as_dict()[strongr.core.Core.config().schedulerdomain.scalingdriver]) if strongr.core.Core.config().schedulerdomain.scalingdriver in strongr.core.Core.config().schedulerdomain.as_dict().keys() else {})
| [
"thomas@tphil.nl"
] | thomas@tphil.nl |
56bdf959b9df6e6457d727f24c203fd0f3424027 | 68c3c27fd7d340d0974da28c18c6f72162afd9d7 | /src/pykka/_exceptions.py | 4fff2e52f96c6a60ed417269b435f9cd12ebae89 | [
"Apache-2.0"
] | permissive | bobfang1992/pykka | 22cab79099971fe507d99242e21e4ef033be7b14 | d088d1e70e77cd9bee4a015a8ec8bfcb1e8127dc | refs/heads/main | 2023-08-18T15:42:50.258200 | 2021-10-04T09:56:10 | 2021-10-04T09:56:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | __all__ = ["ActorDeadError", "Timeout"]
class ActorDeadError(Exception):
"""Exception raised when trying to use a dead or unavailable actor."""
pass
class Timeout(Exception):
"""Exception raised at future timeout."""
pass
| [
"stein.magnus@jodal.no"
] | stein.magnus@jodal.no |
44e5ab90fbe67fb924dd058c292888808e043964 | f3af946a1b31fa69d25b79284ec3d37a3666524a | /core/forms.py | 9a21ce0f94fb2b3818f62df0ef9dac9b1364b26a | [] | no_license | od-5/distrubutor | ec813b7aa7014434396b80c10e075659889ca4a7 | e2de981144bd4ac4d3ba6a2800368c39bb4d4144 | refs/heads/master | 2020-05-21T13:36:46.157223 | 2017-12-15T14:12:55 | 2017-12-15T14:12:55 | 55,419,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,190 | py | # coding=utf-8
from django import forms
from core.models import User, Setup
__author__ = 'alexy'
class UserAddForm(forms.ModelForm):
class Meta:
model = User
fields = ('email', 'last_name', 'first_name', 'patronymic', 'phone')
widgets = {
'email': forms.EmailInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'patronymic': forms.TextInput(attrs={'class': 'form-control'}),
'phone': forms.TextInput(attrs={'class': 'form-control'}),
}
password1 = forms.CharField(label=u'Пароль', widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password2 = forms.CharField(label=u'Повторите пароль', widget=forms.PasswordInput(attrs={'class': 'form-control'}))
def clean_email(self):
data = self.cleaned_data
try:
User.objects.get(email=data['email'])
except User.DoesNotExist:
return data['email']
raise forms.ValidationError(u'Пользователь с таким e-mail уже зарегистрирован')
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(u'Пароль и подтверждение пароля не совпадают!')
return password2
def save(self, commit=True):
user = super(UserAddForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ('email', 'last_name', 'first_name', 'patronymic', 'phone', 'is_active',)
widgets = {
'email': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'patronymic': forms.TextInput(attrs={'class': 'form-control'}),
'phone': forms.TextInput(attrs={'class': 'form-control'}),
'is_active': forms.CheckboxInput(),
}
class SetupForm(forms.ModelForm):
class Meta:
model = Setup
fields = '__all__'
widgets = {
'meta_title': forms.TextInput(attrs={'class': 'form-control'}),
'meta_keys': forms.Textarea(attrs={'class': 'form-control'}),
'meta_desc': forms.Textarea(attrs={'class': 'form-control'}),
'email': forms.EmailInput(attrs={'class': 'form-control'}),
'phone': forms.TextInput(attrs={'class': 'form-control'}),
'video': forms.Textarea(attrs={'class': 'form-control'}),
'top_js': forms.Textarea(attrs={'class': 'form-control'}),
'bottom_js': forms.Textarea(attrs={'class': 'form-control'}),
'robots_txt': forms.Textarea(attrs={'class': 'form-control'}),
}
| [
"od-5@yandex.ru"
] | od-5@yandex.ru |
7bc9dcb6568777ec2825b64d06718f69760d2771 | 609e0369b176638f55f0f59342dfe51120e5e070 | /structure/score.py | 17b41cad80492ac05c5d8b365830db33783c42b8 | [
"MIT"
] | permissive | dpazel/music_rep | fd8d2691fca716a06db1fa60b0fd9b377b40bf76 | c2aa0496a02a2785c9736a542e259ce4bdcc7a35 | refs/heads/master | 2022-12-07T12:33:28.774646 | 2022-12-04T15:45:57 | 2022-12-04T15:45:57 | 81,465,598 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,174 | py | """
File: score.py
Purpose: Class representing a score, consisting of a number of instrument voices. It also retains event
sequences for tempo and time, which are global over all the voices.
"""
from timemodel.tempo_event_sequence import TempoEventSequence
from timemodel.time_signature_event_sequence import TimeSignatureEventSequence
from timemodel.time_conversion import TimeConversion
from timemodel.duration import Duration
from timemodel.position import Position
from misc.interval import Interval
from structure.instrument_voice import InstrumentVoice
from harmoniccontext.harmonic_context_track import HarmonicContextTrack
class Score(object):
"""
Class representing a score, consisting of a number of instrument voices. It also retains event
sequences for tempo and time, which are global over all the voices.
"""
def __init__(self):
"""
Constructor.
"""
self.__instrument_voices = list()
# map from instrument class to the instrument voices added.
self.class_map = dict()
# map instrument class name to InstrumentClass
self.name_class_map = dict()
# HCT not utilized, TODO
self.__hct = HarmonicContextTrack()
self.__tempo_sequence = TempoEventSequence()
self.__time_signature_sequence = TimeSignatureEventSequence()
@property
def tempo_sequence(self):
return self.__tempo_sequence
@property
def time_signature_sequence(self):
return self.__time_signature_sequence
@property
def hct(self):
return self.__hct
def add_instrument_voice(self, instrument_voice):
if not isinstance(instrument_voice, InstrumentVoice):
raise Exception('parameter must be InstrumentVoice type not {0}'.format(type(instrument_voice)))
instrument_family = instrument_voice.instrument.parent
instrument_class = instrument_family.parent
# add the name to class map if not already there.
if instrument_class.name.upper() not in self.name_class_map:
self.name_class_map[instrument_class.name.upper()] = instrument_class
# Add the map key to class_map if not there,
# then append the given instrument voice to to map target.
if instrument_class not in self.class_map:
self.class_map[instrument_class] = []
self.class_map[instrument_class].append(instrument_voice)
# Add the instrument voice to the general list.
self.__instrument_voices.append(instrument_voice)
def get_class_instrument_voices(self, class_name):
if class_name.upper() not in self.name_class_map:
return []
else:
return list(self.class_map[class_name.upper()])
@property
def instrument_voices(self):
return list(self.__instrument_voices)
@property
def instrument_classes(self):
return [k for (k, _) in self.class_map.items()]
def get_instrument_voice(self, instrument_name):
answer = []
for inst_voice in self.__instrument_voices:
if inst_voice.instrument.name.upper() == instrument_name.upper():
answer.append(inst_voice)
return answer
def remove_instrument_voice(self, instrument_voice):
if instrument_voice not in self.__instrument_voices:
raise Exception('Attempt to remove voice {0} which does not exist'.format(
instrument_voice.instrument.name))
self.__instrument_voices.remove(instrument_voice)
class_name = instrument_voice.instrument.parent.parent.name
class_object = self.name_class_map[class_name.upper()]
class_list = self.class_map[class_object]
class_list.remove(instrument_voice)
if len(class_list) == 0:
self.name_class_map.pop(class_name.upper())
self.class_map.pop(class_object)
def remove_instrument_class(self, class_name):
if class_name.upper() not in self.name_class_map:
raise Exception('Attempt to remove class voices {0} which do not exist'.format(class_name))
@property
def duration(self):
return self.length()
def length(self):
duration = Duration(0)
for voice in self.__instrument_voices:
duration = voice.duration if voice.duration > duration else duration
return duration
def real_time_duration(self):
interval = Interval(0, self.duration)
conversion = TimeConversion(self.tempo_sequence, self.time_signature_sequence, Position(self.duration.duration))
return conversion.position_to_actual_time(interval.upper)
def get_notes_by_wnt_interval(self, interval):
"""
Get all the notes in the score by interval: Return dict structure as follows:
instrument_voice --> {voice_index --> [notes]}
"""
answer = {}
for instrument_voice in self.__instrument_voices:
answer[instrument_voice] = instrument_voice.get_notes_by_interval(interval)
return answer
def get_notes_by_rt_interval(self, interval):
conversion = TimeConversion(self.tempo_sequence, self.time_signature_sequence, Position(self.duration.duration))
wnt_interval = Interval(conversion.actual_time_to_position(interval.lower),
conversion.actual_time_to_position(interval.upper))
return self.get_notes_by_wnt_interval(wnt_interval)
def get_notes_by_bp_interval(self, interval):
conversion = TimeConversion(self.tempo_sequence, self.time_signature_sequence, Position(self.duration.duration))
wnt_interval = Interval(conversion.bp_to_position(interval.lower), conversion.bp_to_position(interval.upper))
return self.get_notes_by_wnt_interval(wnt_interval)
def get_notes_starting_in_wnt_interval(self, interval):
"""
Get all the notes starting in the score by whole note time interval: Return dict structure as follows:
instrument_voice --> {voice_index --> [notes]}
"""
answer = {}
for instrument_voice in self.__instrument_voices:
answer[instrument_voice] = instrument_voice.get_notes_starting_in_interval(interval)
return answer
def get_notes_starting_in_rt_interval(self, interval):
"""
Get all notes starting in the score by an interval based on real time: Return dict structure as follows:
instrument_voice --> {voice_index --> [notes]}
"""
conversion = TimeConversion(self.tempo_sequence, self.time_signature_sequence, Position(self.duration.duration))
wnt_interval = Interval(conversion.actual_time_to_position(interval.lower),
conversion.actual_time_to_position(interval.upper))
return self.get_notes_starting_in_wnt_interval(wnt_interval)
def get_notes_starting_in_bp_interval(self, interval):
"""
Get all notes starting in the score by an interval based on beat position: Return dict structure as follows:
instrument_voice --> {voice_index --> [notes]}
"""
conversion = TimeConversion(self.tempo_sequence, self.time_signature_sequence, Position(self.duration.duration))
wnt_interval = Interval(conversion.bp_to_position(interval.lower), conversion.bp_to_position(interval.upper))
return self.get_notes_starting_in_wnt_interval(wnt_interval)
@property
def beat_duration(self):
duration = self.duration
conversion = TimeConversion(self.tempo_sequence, self.time_signature_sequence, Position(self.duration.duration))
return conversion.position_to_bp(Position(duration.duration))
@property
def real_duration(self):
duration = self.duration
conversion = TimeConversion(self.tempo_sequence, self.time_signature_sequence, Position(self.duration.duration))
return conversion.position_to_actual_time(Position(duration.duration))
| [
"dpazel@optonline.net"
] | dpazel@optonline.net |
68e72358f866f6e05969f6c92c7aabd20de93eba | 88450311cfc5611e1dbfa1886c9fd027d6ff04a4 | /mkt/receipts/utils.py | 087e1134e158ce9b123946045d4506685d3f1722 | [] | no_license | rtilder/zamboni | 0fa713b4a989eb5408fbd6717428e026ae5d71fb | 9902f058f10a517b15c66b5dfe0f0d9fb3c9a34b | refs/heads/master | 2021-01-17T21:54:48.574071 | 2013-04-29T18:26:40 | 2013-04-29T18:26:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,129 | py | import calendar
import time
from urllib import urlencode
from django.conf import settings
import jwt
from nose.tools import nottest
from access import acl
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from amo.utils import memoize
from lib.crypto.receipt import sign
from mkt.webapps.models import Installed
@memoize(prefix='create-receipt', time=60 * 10)
def create_receipt(installed_pk, flavour=None):
assert flavour in [None, 'developer', 'reviewer'], (
'Invalid flavour: %s' % flavour)
installed = Installed.objects.get(pk=installed_pk)
webapp = installed.addon
origin = (settings.SITE_URL if webapp.is_packaged else webapp.origin)
time_ = calendar.timegm(time.gmtime())
typ = 'purchase-receipt'
product = {'url': origin, 'storedata': urlencode({'id': int(webapp.pk)})}
# Generate different receipts for reviewers or developers.
expiry = time_ + settings.WEBAPPS_RECEIPT_EXPIRY_SECONDS
if flavour:
if not (acl.action_allowed_user(installed.user, 'Apps', 'Review') or
webapp.has_author(installed.user)):
raise ValueError('User %s is not a reviewer or developer' %
installed.user.pk)
# Developer and reviewer receipts should expire after 24 hours.
expiry = time_ + (60 * 60 * 24)
typ = flavour + '-receipt'
verify = absolutify(reverse('receipt.verify', args=[webapp.guid]))
else:
verify = settings.WEBAPPS_RECEIPT_URL
detail = reverse('account.purchases.receipt', args=[webapp.pk])
reissue = webapp.get_purchase_url('reissue')
receipt = dict(detail=absolutify(detail), exp=expiry, iat=time_,
iss=settings.SITE_URL, nbf=time_, product=product,
reissue=absolutify(reissue), typ=typ,
user={'type': 'directed-identifier',
'value': installed.uuid},
verify=verify)
if settings.SIGNING_SERVER_ACTIVE:
# The shiny new code.
return sign(receipt)
else:
# Our old bad code.
return jwt.encode(receipt, get_key(), u'RS512')
@nottest
def create_test_receipt(root, status):
time_ = calendar.timegm(time.gmtime())
detail = absolutify(reverse('receipt.test.details'))
receipt = {
'detail': absolutify(detail),
'exp': time_ + (60 * 60 * 24),
'iat': time_,
'iss': settings.SITE_URL,
'nbf': time_,
'product': {
'storedata': urlencode({'id': 0}),
'url': root,
},
'reissue': detail,
'typ': 'test-receipt',
'user': {
'type': 'directed-identifier',
'value': 'none'
},
'verify': absolutify(reverse('receipt.test.verify',
kwargs={'status': status}))
}
if settings.SIGNING_SERVER_ACTIVE:
return sign(receipt)
else:
return jwt.encode(receipt, get_key(), u'RS512')
def get_key():
"""Return a key for using with encode."""
return jwt.rsa_load(settings.WEBAPPS_RECEIPT_KEY)
| [
"amckay@mozilla.com"
] | amckay@mozilla.com |
a76fb56e6a4cce926ef9f14d1139d4765399f8e4 | cf3e91a574dd1520234823f895757e6bb406928c | /백준/1168.py | c1538773a311cc5e4f7b803cd7d25b2ce275e79a | [] | no_license | MelRanG/Algorithm | 92a5c6c4d6b89d72cf8e0ea621544bc4ed963222 | f176028855bca18e0a079d509fe2186b92b4a300 | refs/heads/master | 2023-04-04T10:23:18.963508 | 2021-04-20T14:32:17 | 2021-04-20T14:32:17 | 330,335,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | #요세푸스 문제2
import sys
from collections import deque
n, k = map(int, sys.stdin.readline().split())
q = deque([i for i in range(1, n+1)])
answer = []
while q:
q.rotate(-k+1)
answer.append(str(q.popleft()))
sys.stdout.write('<' + ', '.join(answer) + '>')
| [
"tpdud285@gmail.com"
] | tpdud285@gmail.com |
6dface476f5a76e007c3b135a37cdf8c71db7f2d | af35f890c0c6a2fa531f47a4c2ed132e8920190d | /python/leetcode/string/804_unique_morse_code.py | bec046830f755e84d728d04867f4bce3b4a67318 | [] | no_license | Levintsky/topcoder | b1b17cd3fddef5a23297bcbe4e165508d09a655d | a5cb862f0c5a3cfd21468141800568c2dedded0a | refs/heads/master | 2021-06-23T10:15:27.839199 | 2021-02-01T07:49:48 | 2021-02-01T07:49:48 | 188,175,357 | 0 | 1 | null | 2020-05-19T09:25:12 | 2019-05-23T06:33:38 | C | UTF-8 | Python | false | false | 1,758 | py | """
804. Unique Morse Code Words (Easy)
International Morse Code defines a standard encoding where each letter is mapped to a series of dots and dashes, as follows: "a" maps to ".-", "b" maps to "-...", "c" maps to "-.-.", and so on.
For convenience, the full table for the 26 letters of the English alphabet is given below:
[".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
Now, given a list of words, each word can be written as a concatenation of the Morse code of each letter. For example, "cba" can be written as "-.-..--...", (which is the concatenation "-.-." + "-..." + ".-"). We'll call such a concatenation, the transformation of a word.
Return the number of different transformations among all words we have.
Example:
Input: words = ["gin", "zen", "gig", "msg"]
Output: 2
Explanation:
The transformation of each word is:
"gin" -> "--...-."
"zen" -> "--...-."
"gig" -> "--...--."
"msg" -> "--...--."
There are 2 different transformations, "--...-." and "--...--.".
Note:
The length of words will be at most 100.
Each words[i] will have length in range [1, 12].
words[i] will only consist of lowercase letters.
"""
class Solution(object):
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
table = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
memo = set()
for w in words:
tmp = ""
for c in w:
tmp += table[ord(c)-ord('a')]
memo.add(tmp)
return len(memo)
| [
"zhuoyuanchen2014@u.northwestern.edu"
] | zhuoyuanchen2014@u.northwestern.edu |
e19175e49bbfbc085742c2f086426a3c97c7596f | 048df2b4dc5ad153a36afad33831017800b9b9c7 | /yukicoder/yuki_0504.py | ab4d9b8dcab7e3b4491460c8fbc8b6f077cb3fc8 | [] | no_license | fluffyowl/past-submissions | a73e8f5157c647634668c200cd977f4428c6ac7d | 24706da1f79e5595b2f9f2583c736135ea055eb7 | refs/heads/master | 2022-02-21T06:32:43.156817 | 2019-09-16T00:17:50 | 2019-09-16T00:17:50 | 71,639,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | n = input()
a = input()
ans = 1
print 1
for i in xrange(n-1):
b = int(raw_input())
if (b > a):
ans += 1
print ans
| [
"nebukuro09@gmail.com"
] | nebukuro09@gmail.com |
a235fc1b62015aa0984d2b3f060f5c83498e3503 | 1860aa3e5c0ba832d6dd12bb9af43a9f7092378d | /deamo/config/fileparser.py | cc6072513a27f137af5959d44472065dd97e9db8 | [] | no_license | agz1990/GitPython | d90de16451fab9222851af790b67bcccdf35ab75 | 951be21fbf8477bad7d62423b72c3bc87154357b | refs/heads/master | 2020-08-06T18:12:26.459541 | 2015-07-05T14:58:57 | 2015-07-05T14:58:57 | 12,617,111 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | '''
Created on 2013年10月15日
@author: hp41
'''
import configparser
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '42',
'Compression': 'yes', 'CompressionLevel': '9'}
config['bitbucket.org'] = {}
topsecret = config['topsecret.server.com'] = {}
config['bitbucket.org']['User'] = 'hg'
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
if __name__ == '__main__':
pass
| [
"522360568@qq.com"
] | 522360568@qq.com |
b0fba071c0c594035ceb956a40802ab57e637beb | 5d573c2e3a90e8c4e3b884d54aacd4a44d5d0b87 | /Python/python_stack/Django/Surprise/apps/surprise_me/urls.py | 3ead6b497739f0f7c0b1c55b4e0ac3ebfa046571 | [] | no_license | RubenDuran/DojoAssignments | 42d12088feabee09eb1874da010e594b0eb7da18 | aa691ae2c41a35f632fa082fbf2eae60ea1f4601 | refs/heads/master | 2021-01-19T20:57:31.962140 | 2018-02-13T01:40:07 | 2018-02-13T01:40:07 | 88,580,713 | 0 | 2 | null | 2017-06-07T22:01:32 | 2017-04-18T04:14:30 | Python | UTF-8 | Python | false | false | 178 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^results$', views.results),
url(r'^surprise$', views.surprise),
]
| [
"rd@rubenduran.net"
] | rd@rubenduran.net |
31c7d48b7b16bfc4760da7e6e80aff4e4c198726 | 960b3a17a4011264a001304e64bfb76d669b8ac5 | /mstrio/datasources/__init__.py | ca1fc5437ab3a130e763f4eecb7d4150f286f12b | [
"Apache-2.0"
] | permissive | MicroStrategy/mstrio-py | 012d55df782a56dab3a32e0217b9cbfd0b59b8dd | c6cea33b15bcd876ded4de25138b3f5e5165cd6d | refs/heads/master | 2023-08-08T17:12:07.714614 | 2023-08-03T12:30:11 | 2023-08-03T12:30:11 | 138,627,591 | 84 | 60 | Apache-2.0 | 2023-07-31T06:43:33 | 2018-06-25T17:23:55 | Python | UTF-8 | Python | false | false | 751 | py | # flake8: noqa
# isort: off
from .dbms import Dbms, list_available_dbms
# isort: on
from .database_connections import DatabaseConnections
from .datasource_connection import (
CharEncoding,
DatasourceConnection,
DriverType,
ExecutionMode,
list_datasource_connections,
)
from .datasource_instance import (
DatasourceInstance,
DatasourceType,
list_connected_datasource_instances,
list_datasource_instances,
)
from .datasource_login import DatasourceLogin, list_datasource_logins
from .datasource_map import (
DatasourceMap,
Locale,
list_datasource_mappings,
list_locales,
)
from .driver import Driver, list_drivers
from .gateway import Gateway, list_gateways
from .helpers import DBType, GatewayType
| [
"noreply@github.com"
] | MicroStrategy.noreply@github.com |
43d700f08bdbed97d6d9656ae0c768f093b1f0c2 | cf5077d06c5145d93b44c0c00bb93f93fbf4d59d | /voting/tests/models.py | 709becead9a664051da3b77239a7a7fb029d603b | [] | no_license | su-danny/famdates | 16a9ee01d259c9978278415943d918fd47bdfc9e | 301cf997985172c146d917c832390e0db57c03c5 | refs/heads/master | 2016-08-06T18:17:30.345319 | 2014-03-11T10:34:31 | 2014-03-11T10:34:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from django.db import models
class Item(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
| [
"su-tn@simpleunion.com"
] | su-tn@simpleunion.com |
a841b1502b4b8dd077cd385784a01e149ccfe3ee | 5d0e76e3c741adc120ce753bacda1e723550f7ac | /278. First Bad Version.py | d6206a9c6830e43b38b82fcd5994b41d051a9627 | [] | no_license | GoldF15h/LeetCode | d8d9d5dedca3cce59f068b94e2edf986424efdbf | 56fcbede20e12473eaf09c9d170c86fdfefe7f87 | refs/heads/main | 2023-08-25T12:31:08.436640 | 2021-10-20T04:36:23 | 2021-10-20T04:36:23 | 392,336,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | def isBadVersion(n) :
global arr
return arr[n-1]
def sol (n):
cur = 1
if isBadVersion(1) == True :
return 1
if isBadVersion(2) == True :
return 2
while not( isBadVersion(cur) == False and isBadVersion(cur+1) == True ) :
jump = 1
print(isBadVersion(cur),isBadVersion(cur+1))
while cur + jump <= n and isBadVersion(cur + jump) == False :
# print(cur,'-> ',end='')
cur += jump
jump *= 2
# print(cur)
# print('reset')
return cur + 1
if __name__ == "__main__" :
n = int(input())
x = int(input())
arr = [False]*(x-1) + [True]*(n-x+1)
print(sol(n))
# isBadVersion()
| [
"todsapon.singsunjit@gmail.com"
] | todsapon.singsunjit@gmail.com |
2e56d44f3e181c000f34199051c05cd59681526d | 0aa834d73b467a2a59647a0ad9ea67ffe8262f7c | /telethon/tl/tl_message.py | bcb4827904824afcad00114cb583d6ed82e80272 | [
"MIT"
] | permissive | ncovercash/Telethon | f0d8a397d99c700f32938b1e3a5c152da7b93351 | d4d47d40e9d25d18380f68cee0af7c76756bffff | refs/heads/master | 2023-04-23T12:01:28.656424 | 2018-01-12T03:18:32 | 2018-01-12T03:18:32 | 116,685,436 | 1 | 0 | MIT | 2023-03-24T13:27:55 | 2018-01-08T14:15:55 | Python | UTF-8 | Python | false | false | 985 | py | import struct
from . import TLObject, GzipPacked
class TLMessage(TLObject):
"""https://core.telegram.org/mtproto/service_messages#simple-container"""
def __init__(self, session, request):
super().__init__()
del self.content_related
self.msg_id = session.get_new_msg_id()
self.seq_no = session.generate_sequence(request.content_related)
self.request = request
self.container_msg_id = None
def to_dict(self, recursive=True):
return {
'msg_id': self.msg_id,
'seq_no': self.seq_no,
'request': self.request,
'container_msg_id': self.container_msg_id,
}
def __bytes__(self):
body = GzipPacked.gzip_if_smaller(self.request)
return struct.pack('<qii', self.msg_id, self.seq_no, len(body)) + body
def __str__(self):
return TLObject.pretty_format(self)
def stringify(self):
return TLObject.pretty_format(self, indent=0)
| [
"totufals@hotmail.com"
] | totufals@hotmail.com |
46327d57adc9fbf8038cb28e5614cc1b58533ac4 | 4e546e9396abe034cd09841bcf2bba80abfc2c80 | /baseq/snv/vcf/GATK.py | 5fa59ea33dc5350b41d559974b4a704877e599e9 | [
"MIT"
] | permissive | basedata10/baseq | 3bab9c99ba370851eed0c09931da0f6bd9750fba | 0f1786c3392a51a6ec7cb0f32355cd28eaa5df29 | refs/heads/master | 2020-03-17T21:51:55.280875 | 2018-05-17T15:04:34 | 2018-05-17T15:04:34 | 133,978,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | import pandas as pd
import re
import numpy as np
def vcf_stats(sample, vcfpath, min_depth=50):
"""
Stats on the VCF from GATK
::
vcf_stats("sample1", "path/to/vcf", min_depth=30)
Return:
A dict/json containing:
Samplename/counts/mean_depth/GT_01/GT_11/MAF
MAF is minor allel frequency.
"""
print("[info] {} {}".format(sample, vcfpath))
df = pd.read_table(vcfpath, comment="#", header=None,
names=['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'Sample'])
infos = df['Sample'].tolist()
info_splits = [re.split(":", x) for x in infos]
df['DP'] = [int(x[2]) for x in info_splits]
df['GT'] = [x[0] for x in info_splits]
df['GQ'] = [int(x[2]) for x in info_splits]
df['MAF'] = [round(min(z) / sum(z), 3) for z in [[int(y) for y in x[1].split(",")] for x in info_splits]]
df = df.loc[df['DP']>=min_depth, :]
df_mutation = pd.DataFrame(data=0, index=["A", "T", "C", "G"], columns=["A", "T", "C", "G"])
for idx, row in df.iterrows():
if row['REF'] in ["A", "T", "C", "G"] and row["ALT"] in ["A", "T", "C", "G"]:
df_mutation.loc[row['REF'], row['ALT']] += 1
MAF, bin_edges = np.histogram(df['MAF'], bins=50, range=(0, 0.5), normed=True)
MAF = np.round(MAF, 3)
print(MAF)
stats = {
"sample": sample,
"counts": len(df['DP']),
"mean_depth": round(sum(df['DP']) / len(df['DP']), 1),
"GT_01": sum([1 for x in df['GT'] if x == "0/1"]),
"GT_11": sum([1 for x in df['GT'] if x == "1/1"]),
"MAF": MAF
}
return stats | [
"friedpine@gmail.com"
] | friedpine@gmail.com |
9b7cbe5073c6ba271712751e2795ef601aacbe74 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/duplicate_20200619164900.py | 1ca110a999ad61d00ff933f0c610bd8564f47235 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def duplicate(str):
# brute force approach
# is storing the substring and checking how many times they
# occur
# dictionary key is the substring and occurrences is how many
# times its occurred
words ={}
# pointers
left = 0
right = 0
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
25f0ba1ca567a06171b7f6720e5dc85cdb6bfbeb | dfd942e65c5d574cde590eee1b309d06269050c8 | /remainder | d7f67ef55090c5f131b4bc41bdf9526251e4489e | [] | no_license | pfuntner/fun | 4ab552569b72806dad4b6b61f02270b63869c931 | f27fde6a55b88ad07d27c32f4465ff416bde57d1 | refs/heads/master | 2023-07-08T09:44:50.575213 | 2023-07-04T11:03:20 | 2023-07-04T11:03:20 | 168,697,624 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | #! /usr/bin/env python2
import logging
import argparse
parser = argparse.ArgumentParser(description='Try out argparse.REMAINDER')
parser.add_argument('args', metavar='arg', nargs=argparse.REMAINDER, help='Zero or more arguments')
parser.add_argument('-v', '--verbose', action='count', help='Enable debugging')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
log = logging.getLogger()
# log.setLevel(logging.DEBUG if args.verbose else logging.WARNING)
log.setLevel(logging.WARNING - (args.verbose or 0)*10)
print args
| [
"jpfuntne@cisco.com"
] | jpfuntne@cisco.com | |
ac0a5eae97ceb585f7f54a6be1a7369af54d9a8c | e02beeb7ed978131e3036076134d8f7217b1b1cd | /build/movo_desktop/movo_viz/catkin_generated/pkg.installspace.context.pc.py | 83cadceec3ca460d69a2d840740ce02c3b14c259 | [] | no_license | liuyanqi/cs2951k | 0a48926f4ab9d80f1b1fce483d342d10a247fecd | ebed208e99bff973dc3dab3b2b11d0dcfefa25f2 | refs/heads/master | 2021-04-26T22:52:17.939596 | 2018-05-04T03:21:31 | 2018-05-04T03:21:31 | 124,158,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "movo_viz"
PROJECT_SPACE_DIR = "/home/andrewandjasmine/movo_ws/install"
PROJECT_VERSION = "1.0.0"
| [
"liuyanqi@umich.edu"
] | liuyanqi@umich.edu |
2c0e6a399b8114eba9fdb025008bc0d8fbc85ab2 | a7c66b4fa56ba7444c693a7a98ad88e827e3e437 | /zEtc/AmazonOAIII/06_largestItemAssociation.py | 400872b036d56ab96e0d7107acd4ca046e674b7f | [] | no_license | Kohdz/Algorithms | b6ff839a4bf12d0347892e46c6987677948d2d72 | 5f71ba34f7198841fefaa68eee5b95f2f989296b | refs/heads/master | 2021-07-01T01:24:10.653414 | 2021-01-04T14:44:01 | 2021-01-04T14:44:01 | 211,135,595 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # Largest Item Association
# finding largest connected component and sorting it
# https://leetcode.com/problems/accounts-merge/
#
from collections import deque, defaultdict
def largest_item_association(item_association):
item_map = defaultdict(set)
for item_pair in item_association:
item_map[item_pair[0]].add(item_pair[1])
item_map[item_pair[1]].add(item_pair[0])
largest_group = []
visited = set()
for key in item_map.keys():
if key not in visited:
curr_group = []
q = deque()
q.append(key)
while q:
curr = q.popleft()
visited.add(curr)
curr_group.append(curr)
for neighbor in item_map[curr]:
if neighbor not in visited:
q.append(neighbor)
if len(curr_group) > len(largest_group):
largest_group = curr_group
elif len(curr_group) == len(largest_group):
if largest_group > curr_group:
largest_group = curr_group
largest_group.sort()
return largest_group
print(largest_item_association([['item1', 'item2'], ['item3', 'item4'], ['item4', 'item5'], ['item5', 'item6']]))
print(largest_item_association([['item6', 'item7'], ['item3', 'item4'], ['item4', 'item5'], ['item7', 'item8']]))
print(largest_item_association([['item1', 'item2'], ['item4', 'item5'], ['item3', 'item4'], ["item1","item4"]])) | [
"umair.stem@gmail.com"
] | umair.stem@gmail.com |
04d41267ebcd23d33b1ce86271d4e6c38f8fa272 | 437e905d8c214dc25c559b1dc03eaf9f0c85326f | /is28/paranina28/zd2/zdd17.py | 0926aca95e954961e4207bfbd7ab67b67be615c9 | [] | no_license | AnatolyDomrachev/karantin | 542ca22c275e39ef3491b1c0d9838e922423b5a9 | 0d9f60207e80305eb713fd43774e911fdbb9fbad | refs/heads/master | 2021-03-29T03:42:43.954727 | 2020-05-27T13:24:36 | 2020-05-27T13:24:36 | 247,916,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | a=[]
b=[]
m=4
n=4
for i in range(m):
a.append([])
for j in range(n):
a[i].append(int(input()))
print(a)
for l in range(n):
b.append(int(input()))
print(b)
j=int(input("введите j="))
for k in range(m):
a[k][j]=b[k]
print(a)
| [
"you@example.com"
] | you@example.com |
65defdb781284c8ba5ed4f9671e740facff58429 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /0801-0900/0835-Image Overlap/0835-Image Overlap.py | f52b6b52f0f13a4389977451c3f8715b70bca999 | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 403 | py | import collections
class Solution:
def largestOverlap(self, A: List[List[int]], B: List[List[int]]) -> int:
A1 = [i * len(A) * 2 + j for i in range(len(A)) for j in range(len(A)) if A[i][j]]
B1 = [i * len(A) * 2 + j for i in range(len(A)) for j in range(len(A)) if B[i][j]]
table = collections.Counter(i - j for i in A1 for j in B1)
return max(table.values() or [0])
| [
"jiadaizhao@gmail.com"
] | jiadaizhao@gmail.com |
5f771b6108324d52dd346cbcd9006a4b52a653ba | 74f1bb2c16870e140c33dc94f828f58ce042ff57 | /octopus/lib/gravatar.py | df9340af80f4471a7b6528da095567ae9dbbc322 | [
"Apache-2.0"
] | permissive | richard-jones/magnificent-octopus | 1c4fee308732920b5521be7c998890fd23d48aa2 | 4a65f5a3e919af61887302d2911849233347f18f | refs/heads/develop | 2022-07-25T20:53:02.234255 | 2019-02-19T12:22:56 | 2019-02-19T12:22:56 | 22,357,685 | 2 | 3 | NOASSERTION | 2022-07-06T19:15:17 | 2014-07-28T20:58:11 | Python | UTF-8 | Python | false | false | 561 | py | import md5
from urllib import urlopen, urlencode
# get gravatar for email address
def get_gravatar(email, size=None, default=None, border=None):
email = email.lower().strip()
hash = md5.md5(email).hexdigest()
args = {'gravatar_id':hash}
if size and 1 <= int(size) <= 512:
args['size'] = size
if default: args['default'] = default
if border: args['border'] = border
url = 'http://www.gravatar.com/avatar.php?' + urlencode(args)
response = urlopen(url)
image = response.read()
response.close()
return image
| [
"richard@cottagelabs.com"
] | richard@cottagelabs.com |
5d4b06edc18e7ecfc51bc077f3030647ce116c1b | 2b4790d77439d89ad27bdd04bac539283f0dd605 | /basic_ex/04-dict_set.py | 50714d6dfd354fa8ed35de05c6a6e8429b1ea9b4 | [] | no_license | ajioy/python-ex | 9fde4bcfe35edeee5050365660a03bdb6b913da1 | 982a3cdf0de0e140faa4cb539f2961b311de2c2a | refs/heads/master | 2020-04-05T14:06:09.909935 | 2018-08-14T14:43:55 | 2018-08-14T14:43:55 | 59,105,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
d = {'Ajioy': 95, 'Bob': 92, 'Cheer': 23}
print d['Ajioy']
d['Bob'] = 88
print d
print 'Hello' in d # False
print d.get('Ajioy')
print d.get('Hello') # none
print d.get('Hello', -1) # 默认值
print d.pop('Bob')
print d
s = set([1,2,3])
print s
s.add(4)
print s
s.remove(4)
print s
s1 = set([1,2,3])
s2 = set([2,3,4])
print s1 & s2
print s1 | s2
| [
"ajioy@hotmail.com"
] | ajioy@hotmail.com |
8ef7d8f24145710e0e98904312b2cf4afd494074 | cd257631f442d24d2e4902cfb60d05095e7c49ad | /week-05/RPG-Game_project(Elder_ScrollsThe_Wanderer)/level_map.py | ff9876c6dd89053b211d1c2b1c06d918d35ca61f | [] | no_license | green-fox-academy/Chiflado | 62e6fc1244f4b4f2169555af625b6bfdda41a975 | 008893c63a97f4c28ff63cab269b4895ed9b8cf1 | refs/heads/master | 2021-09-04T03:25:25.656921 | 2018-01-15T09:02:47 | 2018-01-15T09:02:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | class Map:
def __init__(self):
self.level_map = [[0, 0, 0, 0, 0, 1, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0,],
[1, 0, 1, 1, 1, 1, 0, 1, 0, 0,],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 1,],
[0, 0, 1, 0, 0, 0, 0, 1, 0, 0,],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0,],
[1, 0, 1, 0, 0, 1, 0, 0, 0, 0,],
[1, 0, 1, 1, 1, 1, 0, 1, 1, 1,],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1,]]
def get_cell(self, x, y):
if 0 <= x <= 9 and 0 <= y <= 9:
if self.level_map[y][x] == 0:
return True | [
"prjevarabalazs@gmail.com"
] | prjevarabalazs@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.