blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad05c517d35ad2ec7ca82834e97cbcb70bd8bad7 | acdb83652ea9405a56c2f0257f867a0b146a47c2 | /lib/Flask/model/Okapi BM25/bm25.py | dc125367d1fe83eec4cb519fee783bfe7731b065 | [] | no_license | latruonghai/query | 01561b4e23e153b06d0a0426377369a33212e4aa | 3d415b16966125015d66c449e3d56a208dfc430b | refs/heads/master | 2023-02-24T08:34:23.121351 | 2021-01-24T22:18:51 | 2021-01-24T22:18:51 | 300,475,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,414 | py | import re
import math
from nltk.stem import PorterStemmer
import os
from pyvi import ViTokenizer, ViPosTagger
import pickle
class QueryParsers:
def __init__(self, file):
self.filename = file
self.query = self.get_queries()
def get_queries(self):
#q = open(self.filename,'r', encoding='utf8').read().lower()
# subsitute all non-word characters with whitespace
dict_path =r'D:\src\InforRetri\proj\pro_dictionary.txt'
with open(dict_path, encoding='utf8') as fobj:
stop_words=fobj.read()
query = ViTokenizer.tokenize(self.filename)
query = query.lower()
# split text into words (tokenized list of a document)
query = query.split()
stop_words = stop_words.split()
prew = ''
for word in query:
if word not in stop_words:
prew = prew + word + ' '
prew = prew.split()
# stemming words
stemmer = PorterStemmer()
query = [stemmer.stem(w) for w in prew]
return prew
class BuildIndex:
b = 0.75
k = 1.2
def __init__(self, files, que):
self.tf = {}
self.df = {}
self.filenames = files
self.file_to_terms = self.process_files()
self.regdex = self.regular_index(self.file_to_terms)
self.invertedIndex = self.inverted_index()
self.dltable = self.docLtable()
self.dl = self.docLen()
self.avgdl = self.avgdocl()
self.N = self.doc_n()
self.idf = self.inverse_df()
q = QueryParsers(que)
query = q.query
self.total_score = self.BM25scores(query)
self.rankedDocs = self.ranked_docs()
def process_files(self):
'''
input: filenames
output: a dictionary, with filename as key, and its term list as values
'''
file_to_terms = {}
for file in self.filenames:
# read the whole text of a file into a single string with lowercase
file_to_terms[file] = open(
file, 'r', encoding="utf8").read().lower()
# subsitute all non-word characters with whitespace
# split text into words (tokenized list for a document)
file_to_terms[file] = file_to_terms[file].split()
# stemming words
stemmer = PorterStemmer()
file_to_terms[file] = [stemmer.stem(
w) for w in file_to_terms[file]]
return file_to_terms
def doc_n(self):
'''
return the number of docs in the collection
'''
return len(self.file_to_terms)
def index_one_file(self, termlist):
'''
map words to their position for one document
input: termlist of a document.
output: a dictionary with word as key, position as value.
'''
fileIndex = {}
for index, word in enumerate(termlist):
if word in fileIndex.keys():
fileIndex[word].append(index)
else:
fileIndex[word] = [index]
return fileIndex
def regular_index(self, termlists):
'''
output: a dictionary. key: filename,
value: a dictionary with word as key, position as value
'''
regdex = {}
for filename in termlists.keys():
regdex[filename] = self.index_one_file(termlists[filename])
return regdex
def inverted_index(self):
'''
output: dictionary. key: word,
value: a dictionary, key is filename, values is its term position of that file
'''
total_index = {}
regdex = self.regdex
for filename in regdex.keys():
self.tf[filename] = {}
for word in regdex[filename].keys():
# tf dict key: filename, value: dict, key is word, value is count
self.tf[filename][word] = len(regdex[filename][word])
if word in self.df.keys():
# df dict key: word, value: counts of doc containing that word
self.df[word] += 1
else:
self.df[word] = 1
if word in total_index.keys():
if filename in total_index[word].keys():
total_index[word][filename].extend(
regdex[filename][word])
else:
total_index[word][filename] = regdex[filename][word]
else:
total_index[word] = {filename: regdex[filename][word]}
return total_index
def docLtable(self):
'''
output: dict, key: word, value: dict(key: number of docs contaiing that word, value:total_freq)
'''
dltable = {}
for w in self.invertedIndex.keys():
total_freq = 0
for file in self.invertedIndex[w].keys():
total_freq += len(self.invertedIndex[w][file])
dltable[w] = {len(self.invertedIndex[w].keys()): total_freq}
return dltable
def docLen(self):
'''
return a dict, key: filename, value: document length
'''
dl = {}
for file in self.filenames:
dl[file] = len(self.file_to_terms[file])
return dl
def avgdocl(self):
sum = 0
for file in self.dl.keys():
sum += self.dl[file]
avgdl = sum/len(self.dl.keys())
return avgdl
def inverse_df(self):
'''
output: inverse doc freq with key: word, value: idf
'''
idf = {}
for w in self.df.keys():
# idf[w] = math.log((self.N - self.df[w] + 0.5)/(self.df[w] + 0.5))
idf[w] = math.log((self.N + 1)/self.df[w])
return idf
def get_score(self, filename, qlist):
'''
filename: filename
qlist: term list of the query
output: the score for a document
'''
score = 0
for w in self.file_to_terms[filename]:
if w not in qlist:
continue
wc = len(self.invertedIndex[w][filename])
score += self.idf[w] * ((wc) * (self.k+1)) / (wc + self.k *
(1 - self.b + self.b * self.dl[filename] / self.avgdl))
return score
def BM25scores(self, qlist):
'''
output: a dictionary with filename as key, score as value
'''
total_score = {}
for doc in self.file_to_terms.keys():
total_score[doc] = self.get_score(doc, qlist)
return total_score
def ranked_docs(self):
ranked_docs = sorted(self.total_score.items(),
key=lambda x: x[1], reverse=True)
return ranked_docs[:5]
# MAIN
lst_docs_paths = "./Dataset"
lst_docs = []
for r, s, f in os.walk(lst_docs_paths):
for file in f:
file_pth = lst_docs_paths + '/' + file
lst_docs.append(file_pth)
#print(lst_docs)
query = 'du lịch giá rẻ'
s = BuildIndex(lst_docs, query)
query = QueryParsers(query)
print("QUERY: ", query.query)
print(s.rankedDocs)
| [
"="
] | = |
ff8119939df8debd77ff8686cfe9e8e79098e1dd | e4624cc8b073941e7c66eac77ac7ca1ef989cd37 | /protests/migrations/0002_auto_20200925_1922.py | f76c3b41c917cc5582d44689e6c1a9722c081f99 | [
"MIT"
] | permissive | mentix02/revolve | a7f9ed4626c626d45a93de589857fe237ab9ca18 | 59e3c2999bb511515478e20edf80556665de6c67 | refs/heads/master | 2022-12-18T16:33:13.582607 | 2020-09-26T10:49:16 | 2020-09-26T10:49:16 | 298,392,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | # Generated by Django 3.1.1 on 2020-09-25 13:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('protests', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='protest',
name='organizer',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name='organized_protests',
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddIndex(
model_name='protest',
index=models.Index(fields=['slug'], name='protests_pr_slug_1f2372_idx'),
),
]
| [
"manan.yadav02@gmail.com"
] | manan.yadav02@gmail.com |
6a163bfb06b31a891a4c2da6e027f54a766b43fe | 0cd19bf1f74a435430a4efbccbc79969fb9a5ee8 | /Assignment 1/file_8.py | af7850ee3d3eb4a213d0abccae7c3eca43f5382e | [] | no_license | madhavmalhotra3089/Introduction-to-python | 91d1b048c3bd990cbd5bfb0a3d7a600d89e66eee | 739e7d88e36852525a03fae341e0dfe4eaedbba5 | refs/heads/master | 2020-03-22T14:57:17.046631 | 2018-07-09T19:55:12 | 2018-07-09T19:55:12 | 140,217,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | string='Madhav'
result = True
str_len = len(string)
print str_len
half_len= int(str_len/2)
print half_len
for i in range(0, half_len):
# you need to check only half of the string
print string[str_len-i-1]
if string[i] != string[str_len-i-1]:
result = False
break
print(result) | [
"madhav.malhotra3089@gmail.com"
] | madhav.malhotra3089@gmail.com |
1086658696ca9f04d28579656a21ca42cfd914ce | 1b9ffc6033c810959d739e0389f490f1573bab2a | /package_bobo_note/class_plus_function_Cat.py | 9c60d1fc95ddc089f53b05f2d8a17157308860d1 | [] | no_license | VicnentX/MachineLearning_PorjectsAndNotes | 7867142fee044d49ca03d6177fa50e199316ea5f | 9eb29e47a67d499da0cd880664ae660ff31cbcad | refs/heads/master | 2020-05-15T07:42:54.748552 | 2019-12-13T05:31:15 | 2019-12-13T05:31:15 | 182,145,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | class Cat:
"""
define a Cat class
"""
# initiate
def __init__(self, new_name, new_age):
self.name = new_name
self.age = new_age
def __str__(self):
return f"{self.name} is {self.age} year old"
# method
def eat(self):
print("eating fish")
def drink(self):
print("drinking cocacola")
def introduce(self):
print(f"{self.name} is {self.age} year old")
tom = Cat("tom", 12)
black_captain = Cat("black_captain", 25)
print(tom)
print(black_captain)
print(tom.introduce())
| [
"chunqiu1xia@gmail.com"
] | chunqiu1xia@gmail.com |
2cf4e87b3e0de4adc8a89119092452af59562e54 | dea64384db1d70c65dba9bf41cced4c2b78a1738 | /addLine.py | c97e22a3158a401c9e3b110fd4d630428bfb2b19 | [] | no_license | jhakala/Vg | bed362bc43bff1ec68849b426e3a014ec95542e6 | 771189e67702118e08961e8fbffdefa2c7de57c7 | refs/heads/master | 2020-12-07T13:45:17.273736 | 2017-12-12T00:15:00 | 2017-12-12T00:15:00 | 52,993,022 | 0 | 0 | null | 2016-03-02T20:11:20 | 2016-03-02T20:11:20 | null | UTF-8 | Python | false | false | 1,018 | py | from sys import argv
from os import path
from ROOT import *
if not len(argv) == 2:
print "Please enter the input filename."
exit(1)
if not path.exists(argv[1]):
print "Input file not found."
exit(1)
category = "antibtag" if "antibtag" in argv[1] else "btag"
inFile = TFile(argv[1])
cans=[]
cans.append(("means" , inFile.Get("p2")))
cans.append(("medians" , inFile.Get("c2a")))
outFile = TFile("lineAdded_%s" % argv[1], "RECREATE")
topLines = []
bottomLines = []
rangeHi = 3600
rangeLow = 500
for can in cans:
can[1].Draw()
can[1].cd()
topLines.append(TLine(rangeLow, 0.5, rangeHi, 0.5))
bottomLines.append(TLine(rangeLow, -0.5, rangeHi, -0.5))
topLines[-1].SetLineStyle(2)
topLines[-1].Draw()
bottomLines[-1].SetLineStyle(2)
bottomLines[-1].Draw()
print "drew lines on canvas", can[1].GetName()
can[1].GetPrimitive("hframe").GetXaxis().SetRangeUser(rangeLow, rangeHi)
can[1].GetPrimitive("hframe").GetYaxis().SetTitleOffset(1.2)
can[1].Print("bias_%s_%s.pdf" % (can[0], category))
| [
"john_hakala@brown.edu"
] | john_hakala@brown.edu |
14aa102d9e3ed79e50dcc29dbb5e082befb790ce | b685036280331fa50fcd87f269521342ec1b437b | /src/data_mining_demo/machine_learning_in_action/chapter3_page62.py | f3d10ff28a9cb7fcdf8d900286d6a1bc2cf9284b | [] | no_license | chenqing666/myML_DM_Test | f875cb5b2a92e81bc3de2a0070c0185b7eacac89 | 5ac38f7872d94ca7cedd4f5057bb93732b5edbad | refs/heads/master | 2022-02-26T01:52:06.293025 | 2019-09-20T06:35:25 | 2019-09-20T06:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | import matplotlib.pyplot as plt
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="22")
arrow_args = dict(arrowstyle="<-")
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
"""
:param nodeTxt: 注释
:param centerPt: 箭头终止位置
:param parentPt: 箭头起始位置
:param nodeType: 节点类型
:return:
"""
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args)
def createPlot():
fig = plt.figure(1, facecolor='white')
fig.clf() # clear figure
axprops = dict(xticks=[], yticks=[])
# createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
createPlot.ax1 = plt.subplot(111, frameon=True) # ticks for demo puropses
plotNode("dd", (.5, .1), (.1, .5), decisionNode)
plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)
plt.show()
createPlot()
| [
"daijitao@ctsi.com.cn"
] | daijitao@ctsi.com.cn |
96acea6c4d014e3384d4217b80831e54028c5ac2 | 6b0192385b791598640867a4c2aedc2da840f784 | /app/models.py | 2ba03ae12c495b11fb20bcc65affc9c6b354546d | [
"MIT"
] | permissive | Albert-Byrone/corruption-feeds | 14e2bc6b83ea758d1a84d87633dbc43995a74db7 | 83ef3fa680acf23079d25debaff461a38b6cb386 | refs/heads/master | 2022-10-12T04:14:25.413542 | 2019-11-01T06:21:54 | 2019-11-01T06:21:54 | 218,772,077 | 0 | 3 | MIT | 2022-09-16T18:12:47 | 2019-10-31T13:26:09 | Python | UTF-8 | Python | false | false | 3,407 | py | from flask_login import current_user,UserMixin
from werkzeug.security import generate_password_hash,check_password_hash
from datetime import datetime
from . import db,login_manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(255),unique=True,nullable=False)
email = db.Column(db.String(255),unique = True,nullable = False)
bio = db.Column(db.String(255),default='My default Bio')
nickname = db.Column(db.String(255),unique=True,nullable=True)
# location = db.Column(db.String(255),unique=True,nullable=True)
profile_pic_path = db.Column(db.String(255),default='default.png')
hashed_password = db.Column(db.String(255),nullable=False)
case = db.relationship('Case',backref='user',lazy='dynamic')
comment = db.relationship('Comment',backref='user',lazy='dynamic')
upvote = db.relationship('Upvote',backref='user',lazy='dynamic')
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self,password):
self.hashed_password=generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.hashed_password,password)
def save_user(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return f"User {self.username}"
class Case(db.Model):
__tablename__='cases'
id = db.Column(db.Integer,primary_key=True)
title = db.Column(db.String(255),nullable=False)
content = db.Column(db.Text(),nullable=False)
posted = db.Column(db.DateTime,default = datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
comment = db.relationship('Comment',backref='case',lazy='dynamic')
upvote = db.relationship('Upvote',backref='case',lazy='dynamic')
def save_cases(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_case(cls,id):
cases = Case.query.get(id)
return cases
def __repr__(self):
return f'Case {self.post}'
class Comment(db.Model):
__tablename__='comments'
id = db.Column(db.Integer,primary_key=True)
comment = db.Column(db.Text(),nullable = False)
posted = db.Column(db.DateTime,default = datetime.utcnow)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
case_id = db.Column(db.Integer,db.ForeignKey('cases.id'))
def save_comments(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,case_id):
comments = Comment.query.filter_by(case_id=case_id).all()
return comments
def __repr__(self):
return f'Case {self.post}'
class Upvote(db.Model):
__tablename__='upvotes'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
case_id = db.Column(db.Integer,db.ForeignKey('cases.id'))
def save_votes(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_votes(cls,id):
votes = Upvote.query.filter_by(case_id=id).all()
return votes
def __repr__(self):
return f'{self.user_id}:{self.case_id}'
| [
"albertbyrone1677@gmail.com"
] | albertbyrone1677@gmail.com |
b1a906eb52a85db92cdc55bd8f3e89853c2de98c | 7502ef2b7a6d4b4c9b789b0042e7b69024ffcc91 | /mysite/polls/views.py | 67e3609ad07cf9b36ed9cc99e730f67ae8811c2e | [] | no_license | CodingNomads/django-polls | ec0aa6d99971e03bac4a5fcddb08611c3c3dc1d0 | c82ab82d2bcffa7fb70d2cbfe5b9e4e059553a40 | refs/heads/master | 2021-12-13T16:07:01.203035 | 2021-08-11T15:35:48 | 2021-08-11T15:35:48 | 244,676,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form including an error message.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"breuss.martin@gmail.com"
] | breuss.martin@gmail.com |
9c9107d0837a28fefe69b8a4bd7eb009bb12d1d6 | 6609c26b4ed72c156104ce282c3cf88c6aac59f6 | /chapter17/examples/advance_features.py | c985a4f23c81b33511c5f99681a5ca31ba3a8b13 | [
"MIT"
] | permissive | yordanivh/intro_to_cs_w_python | 4ab9dbbc2963b285b22cacb6648d1300fded18ce | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | refs/heads/master | 2020-09-06T12:25:23.362118 | 2020-02-14T14:07:07 | 2020-02-14T14:07:07 | 220,423,698 | 0 | 0 | MIT | 2020-02-14T14:07:08 | 2019-11-08T08:41:25 | Python | UTF-8 | Python | false | false | 2,808 | py | #aggregation
>>> cur.execute('SELECT SUM (Population) FROM PopByRegion')
<sqlite3.Cursor object at 0x7f88a81907a0>
>>> cur.fetchone()
(8965762,)
#grouping
>>> cur.execute('''SELECT Region, SUM (Population) FROM PopByCountry GROUP BY Region''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Eastern Asia', 1364389), ('North America', 661200)]
>>> cur.execute('''SELECT SUM (Population) FROM PopByCountry WHERE Region = "North America"''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[(661200,)]
>>> cur.execute('''SELECT SUM (Population) FROM PopByCountry WHERE Region = "Eastern Asia"''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[(1364389,)]
#self-joins
>>> cur.execute('''
... SELECT A.Country, B.Country
... FROM PopByCountry A INNER JOIN PopByCountry B
... WHERE (ABS(A.Population - B.Population) <=1000)
... AND (A.Country != B.Country)''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Republic of Korea', 'Canada'), ('Bahamas', 'Greenland'), ('Canada', 'Republic of Korea'), ('Greenland', 'Bahamas')]
>>> cur.execute('''
... SELECT A.Country, B.Country
... FROM PopByCountry A INNER JOIN PopByCountry B
... WHERE (ABS(A.Population - B.Population) <=1000)
... AND (A.Country < B.Country)''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Bahamas', 'Greenland'), ('Canada', 'Republic of Korea')]
#Nested Queries
>>> cur.execute('''SELECT DISTINCT Region
... FROM PopByCountry
... WHERE (PopByCountry.Population != 8764)''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Eastern Asia',), ('North America',)]
>>> cur.execute('''SELECT DISTINCT Region
... FROM PopByCountry
... WHERE (PopByCountry.Population != 8764)''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Eastern Asia',), ('North America',)]
>>> cur.execute('''
... SELECT DISTINCT Region
... FROM PopByCountry
... WHERE (PopByCountry.Population = 8764)
... ''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Eastern Asia',)]
>>> cur.execute('''
... SELECT DISTINCT Region
... FROM PopByCountry
... WHERE Region NOT IN
... (SELECT DISTINCT Region
... FROM PopByCountry
... WHERE (PopByCountry.Population = 8764))
... ''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('North America',)]
# Transactions
>>>cur.execute('SELECT SignedOut FROM Books WHERE ISBN = ?', isbn)
>>>signedOut = cur.fetchone()[0]
>>>cur.execute('''UPDATE Books SET SignedOut = ?
... WHERE ISBN = ?''', signedOut + 1, isbn)
>>>cur.commit()
>>>cur.execute('SELECT SignedOut FROM Books WHERE ISBN = ?', isbn)
>>>signedOut = cur.fetchone()[0]
>>>cur.execute('''UPDATE Books SET SignedOut = ?
... WHERE ISBN = ?''', signedOut - 1, isbn)
>>>cur.commit()
| [
"yordan@hashicorp.com"
] | yordan@hashicorp.com |
4cead7c664d102e7b8701b6679c11251c93f3262 | 5b9035dbfe0750e9933728f9631ad7a183dd3429 | /02/00/iterdecode.py | d501e5c1c4f91e322d041afbce2e925b84f80cc2 | [
"CC0-1.0"
] | permissive | pylangstudy/201709 | 271efbd4f337d912d0ca958a621eb2a040091528 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | refs/heads/master | 2021-01-21T12:16:21.950493 | 2017-09-30T00:02:34 | 2017-09-30T00:02:34 | 102,058,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | #!python3.6
#encoding:utf-8
import codecs
def byte_iter():
for i in range(5): yield f'日本語_{i}'.encode()
for text in codecs.iterdecode(byte_iter(), encoding='utf-8'):
print(text, text.encode())
| [
"pylangstudy@yahoo.co.jp"
] | pylangstudy@yahoo.co.jp |
0b2eb5a06ed674ebafc5ebd22ced1e6d9153f00e | 8fb1d41797595550418ecfc0e7558f38254b4606 | /django/contrib/gis/geos/point.py | 1b7d7f23ed51892d5d963e44d8a40d42147743a0 | [
"BSD-3-Clause",
"MIT"
] | permissive | hunch/hunch-gift-app | 2aad70a9f18124bf0de02d7a125fa93c765da008 | 8c7cad24cc0d9900deb4175e6b768c64a3d7adcf | refs/heads/master | 2016-09-06T03:13:52.153974 | 2012-03-26T18:11:59 | 2012-03-26T18:11:59 | 1,191,221 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,388 | py | from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, float, long)) and isinstance(y, (int, float, long)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, (int, float, long)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, i.next())
capi.cs_sety(cs, 0, i.next())
if ndim == 3: capi.cs_setz(cs, 0, i.next())
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
| [
"gleitz@hunch.com"
] | gleitz@hunch.com |
b7a9a1cf49e61232166b8257b59f9ac4df425cd5 | 0689ad04900b45e6ffb85756e65e96f30781558b | /pbase/day19/shili/mylist.py | 2fc8cc1d6d64d39221a007ecd54b4a6c488d1ecc | [] | no_license | lizhihui16/aaa | a5452b5d0de4c2ad6342fce1b8aef278d2d2943e | e8c38e012f6aa0bc05ac6481d6c3e2b4e9013b56 | refs/heads/master | 2020-04-24T01:05:19.266060 | 2019-02-20T01:43:51 | 2019-02-20T01:43:51 | 171,586,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py |
class MyList:
'''创建一个自定义列表类,此MyList内部用列表来存储信息'''
def __init__(self,iterable=()):
self.data=[x for x in iterable]
def __repr__(self):
return 'MyList(%s)'%self.data
def __len__(self):
'''方法必须返回整数'''
# return len(self.data)
return self.data.__len__()
def __abs__(self):
'''此方法实现把sekf的所有元素取绝对值后返回全为正数的自定义列表'''
lst=[abs(x) for x in self.data]
L=MyList(lst) #创建新的列表
return L
myl=MyList([1,-2,3,-4])
print(myl)
print(len(myl))
myl3=abs(myl)
print(myl3)
# myl2=MyList([])
# print(myl2) | [
"tarena@tedu.cn"
] | tarena@tedu.cn |
a286e36d05932f1d3e1eaefcc2ab40f45bb14270 | a6355ef8ddb4d31fb4ff45ae755f34482d8c0ff9 | /supervised/tuner/registry.py | 52b5099b6f1150df1f306046e536db9fdea3bc00 | [
"MIT"
] | permissive | michaelneale/mljar-supervised | d4d1b44f4cd5dcbdb36768c5186f2480a53ec3f7 | 8d1b5fdd56e994a7f13ec5f6d2033830744f3d6f | refs/heads/master | 2022-02-06T14:11:21.377791 | 2019-11-05T08:15:02 | 2019-11-05T08:15:02 | 220,161,447 | 0 | 0 | MIT | 2022-01-26T00:32:56 | 2019-11-07T05:51:34 | null | UTF-8 | Python | false | false | 754 | py | # tasks that can be handled by the package
BINARY_CLASSIFICATION = "binary_classification"
MULTICLASS_CLASSIFICATION = "multiclass_classification"
REGRESSION = "regression"
class ModelsRegistry:
registry = {
BINARY_CLASSIFICATION: {},
MULTICLASS_CLASSIFICATION: {},
REGRESSION: {},
}
@staticmethod
def add(task_name, model_class, model_params, required_preprocessing, additional):
model_information = {
"class": model_class,
"params": model_params,
"required_preprocessing": required_preprocessing,
"additional": additional,
}
ModelsRegistry.registry[task_name][
model_class.algorithm_short_name
] = model_information
| [
"pplonski86@gmail.com"
] | pplonski86@gmail.com |
f0ec882c3142a7b3f7479f8dea631fea8827f88a | 20741a0e27b88eb4396516788c1dd7acab7f1527 | /project/apps/config/models.py | 832fa099b90b4e90e478dff781c5f2ac9bc25537 | [] | no_license | yueyoum/lockscreen-image | 2b371f5e133f0e3ace7b3b4597a2633575027415 | 47351c5aafbd97e3c6862798a9666114c83f4fb9 | refs/heads/master | 2021-01-23T10:04:35.901591 | 2015-06-30T08:00:51 | 2015-06-30T08:00:51 | 29,383,019 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # -*- coding: utf-8 -*-
from django.db import models
class Config(models.Model):
name = models.TextField(primary_key=True)
value = models.TextField()
class Meta:
db_table = 'project_config'
verbose_name = '设置'
verbose_name_plural = '设置'
@classmethod
def get_value(cls, key):
try:
return cls.objects.get(name=key).value
except cls.DoesNotExist:
return ''
| [
"yueyoum@gmail.com"
] | yueyoum@gmail.com |
1d9d95820ff83069e478a3b37183e8ded7518493 | 56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a | /applications/CoSimulationApplication/python_scripts/colors.py | 2269c3f01716035217f3aa062b303ce8ae2b8230 | [
"BSD-3-Clause"
] | permissive | KratosMultiphysics/Kratos | 82b902a2266625b25f17239b42da958611a4b9c5 | 366949ec4e3651702edc6ac3061d2988f10dd271 | refs/heads/master | 2023-08-30T20:31:37.818693 | 2023-08-30T18:01:01 | 2023-08-30T18:01:01 | 81,815,495 | 994 | 285 | NOASSERTION | 2023-09-14T13:22:43 | 2017-02-13T10:58:24 | C++ | UTF-8 | Python | false | false | 2,273 | py | PRINT_COLORS = False # Global var to specify if colors should be printed
def color_string(string2color, color_code):
if PRINT_COLORS:
return "\x1b["+color_code+"m" + str(string2color) + "\x1b[0m"
else:
return string2color
def bold(string2color):
return color_string(string2color, "1;1")
def italic(string2color):
return color_string(string2color, "1;3")
def darkify(string2color):
return bold(color_string(string2color, "1;2")) # bold is needed bcs it is removed otherwise
def underline(string2color):
return color_string(string2color, "1;4")
def blue(string2color):
return color_string(string2color, "1;34")
def darkblue(string2color):
return (darkify(blue(string2color)))
def red(string2color):
return color_string(string2color, "1;31")
def darkred(string2color):
return (darkify(red(string2color)))
def green(string2color):
return color_string(string2color, "1;32")
def darkgreen(string2color):
return (darkify(green(string2color)))
def yellow(string2color):
return color_string(string2color, "1;33")
def darkyellow(string2color):
return (darkify(yellow(string2color)))
def cyan(string2color):
return color_string(string2color, "1;36")
def darkcyan(string2color):
return (darkify(cyan(string2color)))
def magenta(string2color):
return color_string(string2color, "1;35")
def darkmagenta(string2color):
return (darkify(magenta(string2color)))
if __name__ == "__main__":
print("printing all color options:\n")
str2print = "MyCustomString"
PRINT_COLORS = True
print("print:", str2print)
print("bold:", bold(str2print))
print("italic:", italic(str2print))
print("darkify:", darkify(str2print))
print("underline:", underline(str2print))
print("blue:", blue(str2print))
print("darkblue:", darkblue(str2print))
print("red:", red(str2print))
print("darkred:", darkred(str2print))
print("green:", green(str2print))
print("darkgreen:", darkgreen(str2print))
print("yellow:", yellow(str2print))
print("darkyellow:", darkyellow(str2print))
print("cyan:", cyan(str2print))
print("darkcyan:", darkcyan(str2print))
print("magenta:", magenta(str2print))
print("darkmagenta:", darkmagenta(str2print))
| [
"philipp.bucher@tum.de"
] | philipp.bucher@tum.de |
71b166eca1b5d4658c1a7725081ee16a782a7cbd | 98032c5363d0904ba44e1b5c1b7aa0d31ed1d3f2 | /Chapter07/ch7/json_examples/json_cplx.py | 62a571ee8e31bf133df4dcb1f2adf45092c8410c | [
"MIT"
] | permissive | PacktPublishing/Learn-Python-Programming-Second-Edition | 7948b309f6e8b146a5eb5e8690b7865cb76136d5 | 54fee44ff1c696df0c7da1e3e84a6c2271a78904 | refs/heads/master | 2023-05-12T08:56:52.868686 | 2023-01-30T09:59:05 | 2023-01-30T09:59:05 | 138,018,499 | 65 | 44 | MIT | 2023-02-15T20:04:34 | 2018-06-20T10:41:13 | Jupyter Notebook | UTF-8 | Python | false | false | 679 | py | import json
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, complex):
return {
'_meta': '_complex',
'num': [obj.real, obj.imag],
}
return json.JSONEncoder.default(self, obj)
data = {
'an_int': 42,
'a_float': 3.14159265,
'a_complex': 3 + 4j,
}
json_data = json.dumps(data, cls=ComplexEncoder)
print(json_data)
def object_hook(obj):
try:
if obj['_meta'] == '_complex':
return complex(*obj['num'])
except (KeyError, TypeError):
return obj
data_out = json.loads(json_data, object_hook=object_hook)
print(data_out)
| [
"33118647+romydias@users.noreply.github.com"
] | 33118647+romydias@users.noreply.github.com |
717db42d79ee7b0d3916cafd2ff9cbc961178823 | 89284da682f723c6aaad8ef6bba37ac31cd30c92 | /PythonTutorial/Advance/multiple_threads_and_processes/code/processes/shared_memory_array.py | f85b49bae8270d4d704ef6ec7d53d8bb2a7f8623 | [] | no_license | Danielyan86/Python-Study | 9d9912e0385c5b4d2b7272e9eaca542ff556dc1a | 782c1638eb9733a4be4acbc4c805a78f0fe77546 | refs/heads/master | 2023-03-17T13:26:31.865927 | 2023-03-05T12:30:07 | 2023-03-05T12:30:07 | 26,902,349 | 28 | 25 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | from multiprocessing import Process, Array
def cube(x):
for i in range(len(x)):
x[i] = x[i] + 1
if __name__ == "__main__":
# Array()初始化了一个拥有int数据类型、长度为3的空数组,该数组已被循环使用,在其中的每个元素上加1。
# 你可以在不同的进程中使用Arr,就像Value一样。这实质上是共享内存的概念。
# 注意:'d'表示双精度浮点数,'i'(在Array("i", 3)中)表示有符号整数。
arr = Array("i", 3)
print(arr[:]) # 初始化一个长度为3的数组,初始值为0
p = Process(target=cube, args=(arr,))
p.start()
p.join()
print(arr[:])
p = Process(target=cube, args=(arr,))
p.start()
p.join()
print(arr[:])
| [
"516495459@qq.com"
] | 516495459@qq.com |
05a0a743111a1fb2bcc0bc354304dab8a950f35a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/MMseg-swin/mmcv/mmcv/parallel/scatter_gather.py | af752be41e688283dae1aee634c26b49818ecea2 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,096 | py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
from torch import Tensor
from torch.nn.parallel._functions import Scatter as OrigScatter
from ._functions import Scatter
from .data_container import DataContainer
ScatterInputs = Union[Tensor, DataContainer, tuple, list, dict]
def scatter(inputs: ScatterInputs,
target_gpus: List[int],
dim: int = 0) -> list:
"""Scatter inputs to target gpus.
The only difference from original :func:`scatter` is to add support for
:type:`~mmcv.parallel.DataContainer`.
"""
def scatter_map(obj):
if isinstance(obj, Tensor):
if target_gpus != [-1]:
return OrigScatter.apply(target_gpus, None, dim, obj)
else:
# for CPU inference we use self-implemented scatter
return Scatter.forward(target_gpus, obj)
if isinstance(obj, DataContainer):
if obj.cpu_only:
return obj.data
else:
return Scatter.forward(target_gpus, obj.data)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
out = list(map(list, zip(*map(scatter_map, obj))))
return out
if isinstance(obj, dict) and len(obj) > 0:
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return out
return [obj for _ in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None # type: ignore
def scatter_kwargs(inputs: ScatterInputs,
kwargs: ScatterInputs,
target_gpus: List[int],
dim: int = 0) -> Tuple[tuple, tuple]:
"""Scatter with support for kwargs dictionary."""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
length = len(kwargs) - len(inputs)
inputs.extend([() for _ in range(length)]) # type: ignore
elif len(kwargs) < len(inputs):
length = len(inputs) - len(kwargs)
kwargs.extend([{} for _ in range(length)]) # type: ignore
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
eaed96112b1511de5d974f5a8149f020a7934fb2 | 669a514211247356ed9be3ad07d138a5afd1f889 | /LaboratorioDePOOUSP/venv/bin/pip3 | 5abfae87242e35257414efc2b615b165aba99c40 | [] | no_license | borin98/Projetos_Em_Python_Curso_USP | 89a0761ad6cbe6c178358332fcd676cda2313649 | 95e78549e133fb644be95e37b4d4a003ca8da401 | refs/heads/master | 2021-04-12T09:54:04.672567 | 2018-05-04T18:57:02 | 2018-05-04T18:57:02 | 126,911,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | #!/home/borin/PycharmProjects/LaboratorioDePOOUSP/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"borinmacedo@gmail.com"
] | borinmacedo@gmail.com | |
78d6dff4ab665fc0023f1433308f4bfc7502b9a2 | 971c5ae1d87cdfbb97723485c3d76c17395b82b0 | /x86-semantics/semantics_using_uifs/z3EquivFormulas/z3tutorial.py | f4c0553048e4978a3d5e415bc18edc7b4bb2407f | [
"NCSA"
] | permissive | mewbak/binary-decompilation | 7d0bf64d6cd01bfa5f5fc912d74a85ce81124959 | f58da4c53cd823edc4bbbad6b647dbcefd7e64f8 | refs/heads/master | 2020-04-16T06:08:14.983946 | 2019-01-06T17:21:50 | 2019-01-06T17:21:50 | 165,334,058 | 1 | 0 | NOASSERTION | 2019-01-12T01:42:16 | 2019-01-12T01:42:16 | null | UTF-8 | Python | false | false | 7,103 | py | from z3 import *
# x = Int('x')
# y = Int('y')
# solve(x > 2, y < 10, x + 2*y == 7)
#
# x = Int('x')
# y = Int('y')
# print simplify(x + y + 2*x + 3)
# print simplify(x < y + x + 2)
# print simplify(And(x + 1 >= 3, x**2 + x**2 + y**2 + 2 >= 5))
#
# x = Int('x')
# y = Int('y')
# print x**2 + y**2 >= 1
# set_option(html_mode=False)
# print x**2 + y**2 >= 1
# x = Int('x')
# y = Int('y')
# n = x + y >= 3
# print "num args: ", n.num_args()
# print "children: ", n.children()
# print "1st child:", n.arg(0)
# print "2nd child:", n.arg(1)
# print "operator: ", n.decl()
# print "op name: ", n.decl().name()
# x = Real('x')
# solve(x > 4, x < 0)
# p = Bool('p')
# q = Bool('q')
# r = Bool('r')
# solve(Implies(p, q), r == Not(q), Or(Not(p), r))
# p = Bool('p')
# q = Bool('q')
# print And(p, q, True)
# print simplify(And(p, q, True))
# print simplify(And(p, q, False))
# p = Bool('p')
# x = Real('x')
# solve(Or(x < 5, x > 10), Or(p, x**2 == 2), Not(p))
# x = Int('x')
# y = Int('y')
#
# s = Solver()
# print s
#
# s.add(x > 10, y == x + 2)
# print s
# print "Solving constraints in the solver s ..."
# print s.check()
#
# print "Create a new scope..."
# s.push()
# s.add(y < 11)
# print s
# print "Solving updated set of constraints..."
# print s.check()
#
# print "Restoring state..."
# s.pop()
# print s
# print "Solving restored set of constraints..."
# print s.check()
# x = Real('x')
# y = Real('y')
# s = Solver()
# s.add(x > 1, y > 1, Or(x + y > 3, x - y < 2))
# print "asserted constraints..."
# for c in s.assertions():
# print c
#
# print s.check()
# print "statistics for the last check method..."
# print s.statistics()
# # Traversing statistics
# for k, v in s.statistics():
# print "%s : %s" % (k, v)
# x, y, z = Reals('x y z')
# s = Solver()
# s.add(x > 1, y > 1, x + y > 3, z - x < 10)
# print s.check()
#
# m = s.model()
# print "x = %s" % m[x]
#
# print "traversing model..."
# for d in m.decls():
# print "%s = %s" % (d.name(), m[d])
# x = BitVec('x', 16)
# y = BitVec('y', 16)
# print x + 2
# # Internal representation
# print (x + 2).sexpr()
#
# # -1 is equal to 65535 for 16-bit integers
# print simplify(x + y - 1)
#
# # Creating bit-vector constants
# a = BitVecVal(-1, 16)
# b = BitVecVal(65535, 16)
# print simplify(a == b)
#
#
# # Create to bit-vectors of size 32
# x, y = BitVecs('x y', 32)
#
# solve(x + y == 2, x > 0, y > 0)
#
# solve(x & y == ~y)
#
# solve(x < 0)
#
# # using unsigned version of <
# solve(ULT(x, 0))
#
# # Create to bit-vectors of size 32
# x, y = BitVecs('x y', 32)
#
# solve(x >> 2 == 3)
#
# solve(x << 2 == 3)
#
# solve(x << 2 == 24)
# p, q = Bools('p q')
# demorgan = And(p, q) == Not(Or(Not(p), Not(q)))
# print demorgan
#
#
# print "Proving demorgan..."
# prove(demorgan)
#
# x = BitVec('t', 8)
# print x.sexpr()
# y = Extract(7, 4, x)
# z = Extract(3, 0, x)
# w = Concat(y, z)
# print y.sexpr()
# print z.sexpr()
# print w.sexpr()
# s = Solver();
# s.add(Not(w == x))
# print s.check()
# x = Int('x')
# y = Real('y')
# print (x + 1).sort()
# print (y + 1).sort()
# print (x >= 2).sort()
# x = Int('x')
# print "is expression: ", is_expr(x)
# n = x + 1
# print "is application:", is_app(n)
# print "decl: ", n.decl()
# print "num args: ", n.num_args()
# for i in range(n.num_args()):
# print "arg(", i, ") ->", n.arg(i)
# x = Int('x')
# x_d = x.decl()
# print "is_expr(x_d): ", is_expr(x_d)
# print "is_func_decl(x_d):", is_func_decl(x_d)
# print "x_d.name(): ", x_d.name()
# print "x_d.range(): ", x_d.range()
# print "x_d.arity(): ", x_d.arity()
# # x_d() creates an application with 0 arguments using x_d.
# print "eq(x_d(), x): ", eq(x_d(), x)
# print "\n"
# # f is a function from (Int, Real) to Bool
# f = Function('f', IntSort(), RealSort(), BoolSort())
# print "f.name(): ", f.name()
# print "f.range(): ", f.range()
# print "f.arity(): ", f.arity()
# for i in range(f.arity()):
# print "domain(", i, "): ", f.domain(i)
# # f(x, x) creates an application with 2 arguments using f.
# print f(x, x)
# print eq(f(x, x).decl(), f)
# x = Int('x')
# y = Int('y')
# max = If(x > y, (x, y))
# print simplify(max)
# a = BitVecVal(-1, 16)
# b = BitVecVal(65535, 16)
# print simplify(a + b)
# print a.size()
# x = Real("x")
#
# rule = x > 0
# goal = x < 0
#z3.prove(z3.Implies(rule, goal))
# z3.prove(Implies(And(rule, x != -1, x != -2), goal))
# p, q = Bools('p q')
# print simplify(Implies(p, q))
# print simplify(And(Implies(p, q), Implies(q,p)))
# print simplify(p == q)
#print parse_smt2_string('(declare-const x Int) (assert (> x 0)) (assert (< x 10))')
def prove(f):
s = Solver()
s.add(Not(f))
if s.check() == unsat:
print "proved"
else:
print "failed to prove"
# a = BitVec('x', 4)
# b = BitVec('y', 4)
# z = Not(ULE(a, b) == (a <= b))
# prove(z)
# z3.prove(z)
#a = parse_smt2_string('(declare-const a (_ BitVec 4)) (declare-const b (_ BitVec 4)) (assert (not (= (bvule a b) (bvsle a b)))) ')
#print a
#print z3.prove(a)
#a = BitVecVal(0xff, 16)
#print a.sexpr()
# R1 = BitVec('R1', 64)
# R2 = BitVec('R2', 64)
# R3 = BitVec('R3', 64)
# CONST_BV_S8_V63 = BitVecVal(63, 8)
# CONST_BV_S8_V64 = BitVecVal(64, 8)
# CONST_BV_S1_V0 = BitVecVal(0, 1)
# CONST_BV_S64_V0 = BitVecVal(0, 64)
# CONST_BV_S64_VNEG1 = BitVecVal(-1, 64)
# CONST_BV_S57_V0 = BitVecVal(0x0, 57)
# CONST_BV_S8_V3f = BitVecVal(0x3f, 8)
#
# a = BitVecVal( BV2Int( CONST_BV_S8_V63 , is_signed=False), 64)
# Declarations
# CF = BitVec('CF', 1)
# PF = BitVec('PF', 1)
# AF = BitVec('AF', 1)
# ZF = BitVec('ZF', 1)
# SF = BitVec('SF', 1)
# OF = BitVec('OF', 1)
#
# RAX = BitVec('RAX', 64)
# RCX = BitVec('RCX', 64)
# ZERO1 = BitVecVal(0, 1)
# ONE1 = BitVecVal(1, 1)
#
# cf = (CF == ONE1)
# pf = (PF == ONE1)
# af = (AF == ONE1)
# zf = (ZF == ONE1)
# sf = (SF == ONE1)
# of = (OF == ONE1)
#
# cvt_int32_to_single = Function('f', IntSort(), Float32())
# XX = Function('t', BitVecSort(32), IntSort())
#
# print('[6;30;44m' + 'Opcode:vcvtdq2ps_xmm_xmm' + '[0m')
#
# R1 = BitVec('R1', 256)
# R2 = BitVec('R2', 256)
# CONST_BV_S128_V0 = BitVecVal(0, 128)
#
# #PK_R2 = Concat(CONST_BV_S128_V0, fpToIEEEBV (cvt_int32_to_single ( Extract (127, 96, R1))))
# #PS_R2 = Concat(CONST_BV_S128_V0, fpToIEEEBV (cvt_int32_to_single ( Extract (127, 96, R1))))
#
# PK_R2 = fpToIEEEBV (cvt_int32_to_single ( XX(Extract (127, 96, R1))))
# PS_R2 = fpToIEEEBV (cvt_int32_to_single ( XX(Extract (127, 96, R1))))
#
# print simplify(PK_R2)
# print simplify(PS_R2)
#
# print eq(PK_R2, PS_R2)
# print prove(PK_R2 == PS_R2)
######### Test <<
# X = BitVecVal(2, 8)
# ONE = BitVecVal(1, 1)
#
# Y = X << BitVecVal(Int2BV(1, 8), X.size())
# print Y
# print simplify(Y)
CONST_BV_S1_V1 = BitVecVal(0x1, 1)
b = Int('b')
Y2 = Function('Y', IntSort(), IntSort(), IntSort())
a = Int('a')
def X(a):
return a
z3.prove( X(a) == a )
def Y1(a,b):
return Y2(b,a)
z3.prove( Y1(a, b) == Y2(b, a) )
def X(a):
if(a==3):
return 1
else:
return 2
print(X(a) == If (a == 3, 1, 2))
| [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
02caebbc080991e786b864bdcff5e6559bee9ffb | 930309163b930559929323647b8d82238724f392 | /arc025_2.py | 31eb79bf696c226f9cf49a3df1266da20ed55b4d | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py |
class Cumsum2d:
dp: "Optional[List[List]]" = None
@staticmethod
def generate(h, w, a):
'''da[i][j]:(0,0)~(i,j)の長方形の和'''
Cumsum2d.dp = dp = [[0]*w for j in range(h)]
dp[0][0] = a[0][0]
for i in range(1,w):
dp[0][i] = dp[0][i-1]+a[0][i]
for i in range(1,h):
cnt_w = 0
for j in range(w):
cnt_w += a[i][j]
dp[i][j] = dp[i-1][j]+cnt_w
return dp
@staticmethod
def calc(p, q, x, y):
dp = Cumsum2d.dp
'''da_calc(p,q,x,y):(p,q)~(x,y)の長方形の和'''
if p > x or q > y:
return 0
if p == 0 and q == 0:
return dp[x][y]
if p == 0:
return dp[x][y]-dp[x][q-1]
if q == 0:
return dp[x][y]-dp[p-1][y]
return dp[x][y]-dp[p-1][y]-dp[x][q-1]+dp[p-1][q-1]
import itertools
H,W=map(int, input().split())
C = [list(map(int, input().split())) for _ in range(H)]
for h,w in itertools.product(range(H), range(W)):
if (h+w)%2==0: # ブラックチョコならば
C[h][w] *= -1 # 正負を反転
Cumsum2d.generate(H,W,C)
ans = 0
for p, q in itertools.product(range(H), range(W)):
for x, y in itertools.product(range(p, H), range(q, W)):
if Cumsum2d.calc(p,q,x,y) == 0:
ans = max(ans, (x-p+1)*(y-q+1))
print(ans)
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
3f06d0b16de54e75a4262e8811413a8a3debcf2e | 86d33ca948fefd0e4dcea71fc8a53c6b062eca8b | /safe_relay_service/gas_station/models.py | 8aae69493abc13847eb7c3fcf076ef6b1184787d | [
"MIT"
] | permissive | energywebfoundation/safe-relay-service | f140f39375c1e916810e94b7289355cb6f1b00a5 | 7a34b2492a78027aa1131360c445d2de3ec1aaee | refs/heads/master | 2020-09-26T18:24:57.727244 | 2019-12-16T18:19:25 | 2019-12-16T18:19:25 | 226,312,414 | 0 | 0 | MIT | 2019-12-06T11:15:00 | 2019-12-06T11:14:59 | null | UTF-8 | Python | false | false | 920 | py | from django.db import models
from model_utils.models import TimeStampedModel
class GasPrice(TimeStampedModel):
lowest = models.BigIntegerField()
safe_low = models.BigIntegerField()
standard = models.BigIntegerField()
fast = models.BigIntegerField()
fastest = models.BigIntegerField()
class Meta:
get_latest_by = 'created'
def __str__(self):
return '%s lowest=%d safe_low=%d standard=%d fast=%d fastest=%d' % (self.created,
self.lowest,
self.safe_low,
self.standard,
self.fast,
self.fastest)
| [
"uxio@gnosis.pm"
] | uxio@gnosis.pm |
951f93f3ad78397dfe5c0b6c8d65393db383a88c | 3474b315da3cc5cb3f7823f19a18b63a8da6a526 | /scratch/KRAMS/src/apps/scratch/alex/test/prototyped_from.py | c409e5d9084597782be40c6598ca405228c3cce5 | [] | no_license | h4ck3rm1k3/scratch | 8df97462f696bc2be00f1e58232e1cd915f0fafd | 0a114a41b0d1e9b2d68dbe7af7cf34db11512539 | refs/heads/master | 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null | UTF-8 | Python | false | false | 1,311 | py | '''
Created on May 3, 2010
@author: alexander
'''
from enthought.traits.api import \
PrototypedFrom, Float, HasTraits, Instance, Str, DelegatesTo
class Parent ( HasTraits ):
first_name = Str
last_name = Str
def _last_name_changed(self, new):
print "Parent's last name changed to %s." % new
class Child ( HasTraits ):
father = Instance( Parent )
first_name = Str
# last_name = DelegatesTo( 'father' )
# last_name = DelegatesTo( 'father', listenable = False )
last_name = PrototypedFrom( 'father' )
def _last_name_changed(self, new):
print "Child's last name changed to %s." % new
dad = Parent( first_name='William', last_name='Chase' )
son = Child( first_name='John', father=dad )
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
dad.last_name='Jones'
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
son.last_name='Thomas'
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
dad.last_name='Riley'
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
del son.last_name
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
dad.last_name='Simmons'
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
| [
"Axel@Axel-Pc"
] | Axel@Axel-Pc |
8050bb2685427b930d2b9bf195e420e980ddfa6a | 3e77a86429ba0f6968f709e77e204cdfe920a041 | /python/python/src/python_problems/other_python_prob/split_ex.py | 197395da79cb77288a4fcac60b91f4ba222cd5c8 | [] | no_license | ramyamango123/test | a2d9bb6cafe8d7406b76eba526ddded2acf2a3b2 | 85420c9406109a72e1b1d455ea29a5cae9def5a3 | refs/heads/master | 2020-06-07T23:34:08.344051 | 2014-03-12T05:19:05 | 2014-03-12T05:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | import re
#t1 = raw_input("Enter a price:")
t1 = "$2.99 after rebate"
#x1 = regexpi("After rebate")
#print x1
if i in t1:
ts = t1.split(" ")
r1 = ts[0]
A1 = r1[1:]
print A1
else:
x1= t1[1:]
print x1 | [
"ramya@Ramyas-MacBook-Air.local"
] | ramya@Ramyas-MacBook-Air.local |
123cdcb3e7724202187438dd63b63dd01e104218 | 96ced1bf722a6b003a9388f2b1b82e7e2870cb5f | /platforma_inwestorow/scraper/urls.py | e265c4f93e0b2177c42b2e149197302cb15087cb | [] | no_license | topi-chan/scraper_app | bfb069b199e04249257c0968bb5ff3c274d3d0d3 | 4fe97e3ab5480f9365aa6373ca6338b34d221ca7 | refs/heads/master | 2023-02-24T07:19:30.116068 | 2021-01-28T00:36:41 | 2021-01-28T00:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('mailing_assign', views.mailing_assign, name='mailing_assign')
]
| [
"maciej@top-project.com.pl"
] | maciej@top-project.com.pl |
d09b084170f96a5415905a1133d4562ed51f5774 | 677388ca1fc9b489e94598d392d1b584efa0c096 | /unpacking variables.py | ff9f5965f11eb9db6310847d13359f90a83ba125 | [] | no_license | robertruhiu/learn | 356b10a92536e8feca137e0ef15fdbac588785a7 | 6f32418144111ce52c23b79314c1cf2c59ee03b8 | refs/heads/master | 2021-01-23T16:05:38.468043 | 2017-09-14T10:37:29 | 2017-09-14T10:37:29 | 102,722,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from sys import argv
script,first,second = argv
print "The script is called:",script
print"Your first variable is:",first
print"Your second variable is:",second
| [
"robertruhiu@gmail.com"
] | robertruhiu@gmail.com |
687978f653d19ee7a31533d6c0bb62eef063429e | c67603fed9ef7a1ebf5a41944e5f65d37e9ddfb3 | /lovcenbanka/spiders/spider.py | 76a1bbbbdccd8780f43fc061a4156a24c2c3f20b | [] | no_license | hristo-grudev/lovcenbanka | ff20c25f67ff9dfa89c8440cc79f5a741c317def | 1a80c2b5d3fff7a3bee869ad637f0ef5fe872878 | refs/heads/main | 2023-03-13T10:34:13.024062 | 2021-02-24T12:04:57 | 2021-02-24T12:04:57 | 341,887,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | import scrapy
from scrapy.loader import ItemLoader
from ..items import LovcenbankaItem
from itemloaders.processors import TakeFirst
class LovcenbankaSpider(scrapy.Spider):
name = 'lovcenbanka'
start_urls = ['https://lovcenbanka.me/me/novosti']
def parse(self, response):
post_links = response.xpath('//h2[@class="ba-blog-post-title"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
next_page = response.xpath('//div[@class="ba-blog-posts-pagination"]//a/@href').getall()
yield from response.follow_all(next_page, self.parse)
def parse_post(self, response):
title = response.xpath('//h1/text()').get()
description = response.xpath('//div[@class="blog-content-wrapper"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//span[@class="intro-post-date"]/text()').get()
item = ItemLoader(item=LovcenbankaItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
0a37618d4074892270ed144a3b76d507146dfb1f | 32dda10669e459cf37c31f426fa709001d2c75b0 | /atcoder/contest/solved/abc155_b.py | 897a7f9a1fa3b72ca3f84dc1f5154053ba979bcd | [] | no_license | fastso/learning-python | 3300f50d06871245d0bfcbe9d201224580f70852 | d21dbd1b9f31017cdb1ed9b9ffd1e53ffe326572 | refs/heads/master | 2023-02-10T14:43:53.726247 | 2023-01-26T10:14:59 | 2023-01-26T10:14:59 | 193,454,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | n = int(input())
a = list(map(int, input().split()))
for i in a:
if i % 2 == 0:
if i % 3 == 0:
continue
elif i % 5 == 0:
continue
else:
print('DENIED')
exit()
print('APPROVED')
| [
"fastso.biko@gmail.com"
] | fastso.biko@gmail.com |
702daecec153b661b5fca34283aeb67e8b4888ca | 25df9eca90070191be927a35e3285343dd3bfe9b | /main.py | 146f8a307569a9c075633d03b40e6d27ff5a5b29 | [] | no_license | minhnd3796/simple_pyobject_tutorial | 2639bea433212ca4470a5464210ebfce3727ccec | 340b797e7b06ea308652be60aff430ab0811fe28 | refs/heads/master | 2021-05-02T02:22:03.726342 | 2018-02-09T09:13:43 | 2018-02-09T09:13:43 | 120,881,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | import Id
name = Id.get_name()
addr = Id.get_addr()
dob = Id.get_dob()
print("name:", name)
print("addr:", addr)
print("dob:", dob) | [
"gordonnguyen3796@gmail.com"
] | gordonnguyen3796@gmail.com |
5c24e27f03722c90cd20e99b743b2fc90abc4ab9 | c98e9ebdb356360c6dbbfd8fcf5a809fc7c0b975 | /rockypages/admin.py | e2d08ee2907733c629af2b5e61b5db56ccc95387 | [] | no_license | Cerkinfo/homepage | c89f9f7154f2dc6c43109fee500d8a060f16b5a5 | 4b59b48af7e1e4e5b168883ff3f90cc0a9d3e481 | refs/heads/master | 2020-05-29T19:28:21.201760 | 2014-04-17T12:30:04 | 2014-04-17T12:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | from django import forms
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
from ckeditor.widgets import CKEditorWidget
from django.contrib.flatpages.admin import FlatpageForm
from reversion.admin import VersionAdmin
from flatblocks.models import FlatBlock
class FlatPageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/]+$',
help_text = _("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_message = _("This value must contain only letters, numbers,"
" underscores, dashes or slashes."))
content = forms.CharField(widget=CKEditorWidget(), label=_("Content"))
class Meta:
model = FlatPage
class FlatPageAdmin(VersionAdmin):
form = FlatPageForm
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': ('collapse',), 'fields': ('enable_comments', 'registration_required', 'template_name')}),
)
list_display = ('url', 'title')
list_filter = ('sites', 'enable_comments', 'registration_required')
search_fields = ('url', 'title')
class FlatBlockAdmin(VersionAdmin):
pass
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
admin.site.unregister(FlatBlock)
admin.site.register(FlatBlock, FlatBlockAdmin)
| [
"nikita.marchant@gmail.com"
] | nikita.marchant@gmail.com |
b11ff45c86d0a9b296aa011ae26fc47f43f32ac2 | b958551bde844d2bfaa4dd7cb670c4ebab4ff17b | /week4/365_day_advanced.py | f0e79a956030ae39dc60eebefc7cb303568806a0 | [] | no_license | aslupin/cpe31-task | d97d3e495634826d6752c1ee82192cc58045a883 | c15ec9be94b899120db39b076a4d1c644f0af24b | refs/heads/master | 2021-01-15T09:18:54.615578 | 2017-11-17T12:53:23 | 2017-11-17T12:53:23 | 99,573,358 | 0 | 2 | null | 2017-09-20T06:25:31 | 2017-08-07T12:00:05 | Python | UTF-8 | Python | false | false | 782 | py | import math
movement = {"U":1,"R":1,"L":-1,"D":-1} # Const of movement
x = [0];y = [0] # list container for pattern
lastx=0;lasty=0;tmpx=0;tmpy=0 # declare var
check = False
fly = input()
here_x = int(input())
here_y = int(input())
for i in fly: # get each of pattern
if(i == 'U' or i == 'D'):tmpy += movement[i]
if(i == 'R' or i == 'L'):tmpx += movement[i]
if(not(tmpx in x and tmpy in y)):
x.append(tmpx)
y.append(tmpy)
if(here_x <= 0 and here_y <=0):bignum = math.fabs(min(here_x,here_y))
else:bignum = max(here_x,here_y) # big-number
for i in range(len(x)):
lastx = x[i] * bignum ;lasty = y[i] * bignum
for j in range(len(x)):
if(lastx + x[j] == here_x and lasty + y[j] == here_y):check = True
if(check):print('Y')
else:print('N') | [
"poon_arsene_lupin@hotmail.com"
] | poon_arsene_lupin@hotmail.com |
ce38fb44a64fcfae8083fd6ce3fe008b981d12e2 | 5de718a2ab00460f59621e1e3c100b37c0853f61 | /env/Lib/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py | 426f3e3ef410c599f02a37d452f04c4dfe17d460 | [] | no_license | HenryVo31/Connect | 3fd60d893edd1199663878b7b68505e57a410dd6 | 3783e5b4d6b58f19e37ccff66501cb78c35c1500 | refs/heads/master | 2023-02-13T14:21:12.692446 | 2021-01-08T21:40:16 | 2021-01-08T21:40:16 | 295,485,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,382 | py | # mssql/zxjdbc.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
""" # noqa
from .base import MSDialect
from .base import MSExecutionContext
from ... import engine
from ...connectors.zxJDBC import ZxJDBCConnector
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (
self.isinsert or self.isupdate or self.isdelete
) and self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = "jtds:sqlserver"
jdbc_driver_name = "net.sourceforge.jtds.jdbc.Driver"
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x) for x in connection.connection.dbversion.split(".")
)
dialect = MSDialect_zxjdbc
| [
"trungsonvo2001@gmail.com"
] | trungsonvo2001@gmail.com |
714e7c3f7f9102593f2a2bbf1a7f7c6aad0e2d64 | 2cbe01c30ab6cb77973bc2b19b2e573481205ef2 | /poetry/console/commands/debug/resolve.py | ad90f4fac2026268f57ed5de72e8801e4e9b7124 | [
"GPL-3.0-or-later",
"LGPL-3.0-or-later",
"LGPL-2.1-only",
"LGPL-3.0-only",
"BSD-4-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"GPL-3.0-only",
"GPL-2.0-only",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | markovendelin/poetry | 9dc93d9db78b67eae71501761fdaf4db66d76a90 | de73fa07386be26d32bf15044fd81bf979787b9f | refs/heads/master | 2020-03-21T11:30:00.800918 | 2018-06-18T22:58:03 | 2018-06-18T22:58:03 | 138,509,629 | 0 | 0 | MIT | 2018-06-24T19:28:08 | 2018-06-24T19:28:08 | null | UTF-8 | Python | false | false | 3,628 | py | import re
from typing import List
from ..command import Command
class DebugResolveCommand(Command):
"""
Debugs dependency resolution.
debug:resolve
{ package?* : packages to resolve. }
{ --E|extras=* : Extras to activate for the dependency. }
{ --python= : Python version(s) to use for resolution. }
"""
_loggers = ["poetry.repositories.pypi_repository"]
def handle(self):
from poetry.packages import Dependency
from poetry.packages import ProjectPackage
from poetry.puzzle import Solver
from poetry.repositories.repository import Repository
from poetry.semver import parse_constraint
packages = self.argument("package")
if not packages:
package = self.poetry.package
else:
requirements = self._determine_requirements(packages)
requirements = self._format_requirements(requirements)
# validate requirements format
for constraint in requirements.values():
parse_constraint(constraint)
dependencies = []
for name, constraint in requirements.items():
dep = Dependency(name, constraint)
extras = []
for extra in self.option("extras"):
if " " in extra:
extras += [e.strip() for e in extra.split(" ")]
else:
extras.append(extra)
for ex in extras:
dep.extras.append(ex)
dependencies.append(dep)
package = ProjectPackage(
self.poetry.package.name, self.poetry.package.version
)
package.python_versions = (
self.option("python") or self.poetry.package.python_versions
)
for dep in dependencies:
package.requires.append(dep)
solver = Solver(
package, self.poetry.pool, Repository(), Repository(), self.output
)
ops = solver.solve()
self.line("")
self.line("Resolution results:")
self.line("")
for op in ops:
package = op.package
self.line(
" - <info>{}</info> (<comment>{}</comment>)".format(
package.name, package.version
)
)
def _determine_requirements(self, requires): # type: (List[str]) -> List[str]
if not requires:
return []
requires = self._parse_name_version_pairs(requires)
result = []
for requirement in requires:
if "version" not in requirement:
requirement["version"] = "*"
result.append("{} {}".format(requirement["name"], requirement["version"]))
return result
def _parse_name_version_pairs(self, pairs): # type: (list) -> list
result = []
for i in range(len(pairs)):
pair = re.sub("^([^=: ]+)[=: ](.*)$", "\\1 \\2", pairs[i].strip())
pair = pair.strip()
if " " in pair:
name, version = pair.split(" ", 2)
result.append({"name": name, "version": version})
else:
result.append({"name": pair})
return result
def _format_requirements(self, requirements): # type: (List[str]) -> dict
requires = {}
requirements = self._parse_name_version_pairs(requirements)
for requirement in requirements:
requires[requirement["name"]] = requirement["version"]
return requires
| [
"sebastien@eustace.io"
] | sebastien@eustace.io |
5b1119992f2bd6a9b3e95510d7c56c29898df158 | f4dd8aa4e5476ffde24e27273dd47913c7f9177a | /Dlv2_safe2/tests/parser/pasi-brew-eite-99-example-buy-car.test.py | 4b81a4af09c099eeba6884ec3e5b68744983223a | [
"Apache-2.0"
] | permissive | dave90/Dlv_safe2 | e56071ec1b07c45defda571cb721852e2391abfb | f127f413e3f35d599554e64aaa918bc1629985bc | refs/heads/master | 2020-05-30T10:44:13.473537 | 2015-07-12T12:35:22 | 2015-07-12T12:35:22 | 38,256,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,707 | py | input = """
% Facts which are not disputable are declared as heads of 'top'
rule(top).
rule(r11).
rule(r12).
rule(r13).
rule(r21).
rule(r22).
rule(r23).
rule(r24).
rule(r25).
rule(r26).
rule(r27).
rule(r28).
rule(r29).
rule(r31).
rule(r32).
rule(r33).
rule(r34).
rule(r35).
rule(r36).
rule(r37).
rule(r38).
rule(r39).
rule(r41).
rule(r42).
rule(r43).
rule(r44).
rule(r45).
rule(r46).
rule(r47).
rule(r48).
rule(r49).
head(expensive_C,top).
head(safe_C,top).
head(safe_V,top).
head(nice_P,top).
head(fast_P,top).
head(neg_buy_C,r11). nbl(buy_C,r11). pbl(expensive_C,r11).
head(neg_buy_V,r12). nbl(buy_V,r12). pbl(expensive_V,r12).
head(neg_buy_P,r13). nbl(buy_P,r13). pbl(expensive_P,r13).
head(buy_C,r21). nbl(neg_buy_C,r21). pbl(safe_C,r21).
head(buy_V,r22). nbl(neg_buy_V,r22). pbl(safe_V,r22).
head(buy_P,r23). nbl(neg_buy_P,r23). pbl(safe_P,r23).
head(neg_buy_C,r24). pbl(buy_V,r24). pbl(safe_V,r24).
head(neg_buy_C,r27). pbl(buy_P,r27). pbl(safe_P,r27).
head(neg_buy_V,r25). pbl(buy_P,r25). pbl(safe_P,r25).
head(neg_buy_V,r28). pbl(buy_C,r28). pbl(safe_C,r28).
head(neg_buy_P,r26). pbl(buy_C,r26). pbl(safe_C,r26).
head(neg_buy_P,r29). pbl(buy_V,r29). pbl(safe_V,r29).
head(buy_C,r31). nbl(neg_buy_C,r31). pbl(nice_C,r31).
head(buy_V,r32). nbl(neg_buy_V,r32). pbl(nice_V,r32).
head(buy_P,r33). nbl(neg_buy_P,r33). pbl(nice_P,r33).
head(neg_buy_C,r34). pbl(buy_V,r34). pbl(nice_V,r34).
head(neg_buy_C,r37). pbl(buy_P,r37). pbl(nice_P,r37).
head(neg_buy_V,r35). pbl(buy_P,r35). pbl(nice_P,r35).
head(neg_buy_V,r38). pbl(buy_C,r38). pbl(nice_C,r38).
head(neg_buy_P,r36). pbl(buy_C,r36). pbl(nice_C,r36).
head(neg_buy_P,r39). pbl(buy_V,r39). pbl(nice_V,r39).
head(buy_C,r41). nbl(neg_buy_C,r41). pbl(fast_C,r41).
head(buy_V,r42). nbl(neg_buy_V,r42). pbl(fast_V,r42).
head(buy_P,r43). nbl(neg_buy_P,r43). pbl(fast_P,r43).
head(neg_buy_C,r44). pbl(buy_V,r44). pbl(fast_V,r44).
head(neg_buy_C,r47). pbl(buy_P,r47). pbl(fast_P,r47).
head(neg_buy_P,r46). pbl(buy_C,r46). pbl(fast_C,r46).
head(neg_buy_P,r49). pbl(buy_V,r49). pbl(fast_V,r49).
head(neg_buy_V,r45). pbl(buy_P,r45). pbl(fast_P,r45).
head(neg_buy_V,r48). pbl(buy_C,r48). pbl(fast_C,r48).
opp(buy_C,neg_buy_C).
opp(buy_V,neg_buy_V).
opp(buy_P,neg_buy_P).
% define preferences
level(0,top).
level(1,r11).
level(1,r12).
level(1,r13).
level(2,r21).
level(2,r22).
level(2,r23).
level(2,r24).
level(2,r25).
level(2,r26).
level(2,r27).
level(2,r28).
level(2,r29).
level(3,r31).
level(3,r32).
level(3,r33).
level(3,r34).
level(3,r35).
level(3,r36).
level(3,r37).
level(3,r38).
level(3,r39).
level(4,r41).
level(4,r42).
level(4,r43).
level(4,r44).
level(4,r45).
level(4,r46).
level(4,r47).
level(4,r48).
level(4,r49).
kl(0,1). kl(1,2). kl(2,3). kl(3,4).
pr(X,Y) :- kl(L1,L2), level(L1,X),level(L2,Y).
"""
output = """
% Facts which are not disputable are declared as heads of 'top'
rule(top).
rule(r11).
rule(r12).
rule(r13).
rule(r21).
rule(r22).
rule(r23).
rule(r24).
rule(r25).
rule(r26).
rule(r27).
rule(r28).
rule(r29).
rule(r31).
rule(r32).
rule(r33).
rule(r34).
rule(r35).
rule(r36).
rule(r37).
rule(r38).
rule(r39).
rule(r41).
rule(r42).
rule(r43).
rule(r44).
rule(r45).
rule(r46).
rule(r47).
rule(r48).
rule(r49).
head(expensive_C,top).
head(safe_C,top).
head(safe_V,top).
head(nice_P,top).
head(fast_P,top).
head(neg_buy_C,r11). nbl(buy_C,r11). pbl(expensive_C,r11).
head(neg_buy_V,r12). nbl(buy_V,r12). pbl(expensive_V,r12).
head(neg_buy_P,r13). nbl(buy_P,r13). pbl(expensive_P,r13).
head(buy_C,r21). nbl(neg_buy_C,r21). pbl(safe_C,r21).
head(buy_V,r22). nbl(neg_buy_V,r22). pbl(safe_V,r22).
head(buy_P,r23). nbl(neg_buy_P,r23). pbl(safe_P,r23).
head(neg_buy_C,r24). pbl(buy_V,r24). pbl(safe_V,r24).
head(neg_buy_C,r27). pbl(buy_P,r27). pbl(safe_P,r27).
head(neg_buy_V,r25). pbl(buy_P,r25). pbl(safe_P,r25).
head(neg_buy_V,r28). pbl(buy_C,r28). pbl(safe_C,r28).
head(neg_buy_P,r26). pbl(buy_C,r26). pbl(safe_C,r26).
head(neg_buy_P,r29). pbl(buy_V,r29). pbl(safe_V,r29).
head(buy_C,r31). nbl(neg_buy_C,r31). pbl(nice_C,r31).
head(buy_V,r32). nbl(neg_buy_V,r32). pbl(nice_V,r32).
head(buy_P,r33). nbl(neg_buy_P,r33). pbl(nice_P,r33).
head(neg_buy_C,r34). pbl(buy_V,r34). pbl(nice_V,r34).
head(neg_buy_C,r37). pbl(buy_P,r37). pbl(nice_P,r37).
head(neg_buy_V,r35). pbl(buy_P,r35). pbl(nice_P,r35).
head(neg_buy_V,r38). pbl(buy_C,r38). pbl(nice_C,r38).
head(neg_buy_P,r36). pbl(buy_C,r36). pbl(nice_C,r36).
head(neg_buy_P,r39). pbl(buy_V,r39). pbl(nice_V,r39).
head(buy_C,r41). nbl(neg_buy_C,r41). pbl(fast_C,r41).
head(buy_V,r42). nbl(neg_buy_V,r42). pbl(fast_V,r42).
head(buy_P,r43). nbl(neg_buy_P,r43). pbl(fast_P,r43).
head(neg_buy_C,r44). pbl(buy_V,r44). pbl(fast_V,r44).
head(neg_buy_C,r47). pbl(buy_P,r47). pbl(fast_P,r47).
head(neg_buy_P,r46). pbl(buy_C,r46). pbl(fast_C,r46).
head(neg_buy_P,r49). pbl(buy_V,r49). pbl(fast_V,r49).
head(neg_buy_V,r45). pbl(buy_P,r45). pbl(fast_P,r45).
head(neg_buy_V,r48). pbl(buy_C,r48). pbl(fast_C,r48).
opp(buy_C,neg_buy_C).
opp(buy_V,neg_buy_V).
opp(buy_P,neg_buy_P).
% define preferences
level(0,top).
level(1,r11).
level(1,r12).
level(1,r13).
level(2,r21).
level(2,r22).
level(2,r23).
level(2,r24).
level(2,r25).
level(2,r26).
level(2,r27).
level(2,r28).
level(2,r29).
level(3,r31).
level(3,r32).
level(3,r33).
level(3,r34).
level(3,r35).
level(3,r36).
level(3,r37).
level(3,r38).
level(3,r39).
level(4,r41).
level(4,r42).
level(4,r43).
level(4,r44).
level(4,r45).
level(4,r46).
level(4,r47).
level(4,r48).
level(4,r49).
kl(0,1). kl(1,2). kl(2,3). kl(3,4).
pr(X,Y) :- kl(L1,L2), level(L1,X),level(L2,Y).
"""
| [
"davide@davide-All-Series"
] | davide@davide-All-Series |
b28f52ffc894b04f3a0898a1e8524c6aa8dcebb3 | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/output/ch5_code/src/Stepik.5.8.CodeChallenge.LongestPathInArbitraryDAG.py | 6148405bd0771f1e1961245ad63c9942a73cc473 | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | import re
from graph.Graph import Graph
# DON'T USE THIS -- THERE ARE CLEAN IMPLEMENTATIONS OF THE DIFFERENT ALGORITHMS IN THE SUBDIRECTORIES.
# DON'T USE THIS -- THERE ARE CLEAN IMPLEMENTATIONS OF THE DIFFERENT ALGORITHMS IN THE SUBDIRECTORIES.
# DON'T USE THIS -- THERE ARE CLEAN IMPLEMENTATIONS OF THE DIFFERENT ALGORITHMS IN THE SUBDIRECTORIES.
with open('/home/user/Downloads/dataset_240303_7.txt', mode='r', encoding='utf-8') as f:
data = f.read()
lines = data.strip().split('\n')
start_node = int(lines[0].strip())
end_node = int(lines[1].strip())
g = Graph()
next_e_id = 0
for l in lines[2:]:
in_node, out_node, edge_weight = [int(v) for v in re.split('->|:', l)]
if not g.has_node(in_node):
g.insert_node(in_node)
if not g.has_node(out_node):
g.insert_node(out_node)
g.insert_edge(f'E{next_e_id}', in_node, out_node, edge_weight)
next_e_id += 1
# Populate node weights and backtracking info. Each node's data is a tuple where [0] is the calculated weight and [1] is
# the edge the incoming connection that was chosen to calculate that weight (used for backtracking).
#
# start_node should be a root node. Initialize its weight to 0, but initialize all other root node weights to None.
# A None weight is used as a marker to skip over these because we don't want to consider them.
check_nodes = set()
ready_nodes = set()
for node in g.get_nodes(): # Add all roots with None weight and None backtracking edge.
if g.get_in_degree(node) == 0:
initial_weight = None
g.update_node_data(node, (initial_weight, None))
check_nodes |= {g.get_edge_to(e) for e in g.get_outputs(node)}
ready_nodes |= {node}
g.update_node_data(start_node, (0, None)) # Overwrite start_node root with 0 weight and None backtracking edge
# Run the algorithm, populating node weights and backtracking edges
while len(check_nodes) > 0:
for node in check_nodes:
incoming_nodes = {g.get_edge_from(e) for e in g.get_inputs(node)}
if incoming_nodes.issubset(ready_nodes):
incoming_accum_weights = {}
for edge in g.get_inputs(node):
source_node = g.get_edge_from(edge)
source_node_weight, _ = g.get_node_data(source_node)
edge_weight = g.get_edge_data(edge)
# Roots that aren't start_node were initialized to a weight of None -- if you see them, skip them.
if source_node_weight is not None:
incoming_accum_weights[edge] = source_node_weight + edge_weight
if len(incoming_accum_weights) == 0:
max_edge = None
max_weight = None
else:
max_edge = max(incoming_accum_weights, key=lambda e: incoming_accum_weights[e])
max_weight = incoming_accum_weights[max_edge]
g.update_node_data(node, (max_weight, max_edge))
check_nodes.remove(node)
check_nodes |= {g.get_edge_to(e) for e in g.get_outputs(node)}
ready_nodes |= {node}
break
# Now backtrack from the end_node to start_node to get the path.
longest_path_length, _ = g.get_node_data(end_node)
longest_path = [end_node]
_, backtracking_edge = g.get_node_data(end_node)
while backtracking_edge is not None:
prev_node = g.get_edge_from(backtracking_edge)
longest_path.insert(0, prev_node)
_, backtracking_edge = g.get_node_data(prev_node)
print(f'{longest_path_length}')
print(f'{"->".join([str(n) for n in longest_path])}')
| [
"offbynull@gmail.com"
] | offbynull@gmail.com |
4b74c5ebe349418204db0b31dfe8ddd4bc9fb347 | 4a9dada02c749e9e5277fe1e35357d7b2b28ad5c | /顾天媛2018010980/操作系统实验/作业1.py | 62b65b5fea82293af555a8a363ada38f3e1ddb78 | [] | no_license | wanghan79/2020_Option_System | 631cc80f52829390a128a86677de527472470348 | f37b870614edf7d85320da197d932df2f25a5720 | refs/heads/master | 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 | Python | UTF-8 | Python | false | false | 1,053 | py | # !/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
Author: Ty.Gu
Purpose: platform
Created: 24/6/2020
"""
# 作业1. 采用python语言获取操作系统信息;提示:使用Python内置platform工具包
import platform
print(platform.platform()) # 获取操作系统名称及版本号,Windows-10-10.0.18362-SP0
print(platform.system()) # 获取操作系统名称,Windows
print(platform.version()) # 获取操作系统版本号,10.0.18362
print(platform.architecture()) # 获取操作系统的位数,('64bit', 'WindowsPE')
print(platform.machine()) # 计算机类型,AMD64
print(platform.node()) # 计算机的网络名称,DESKTOP-1OBE4SD
print(platform.processor()) # 计算机处理器信息,'Intel64 Family 6 Model 142 Stepping 10, GenuineIntel
print(platform.uname()) # 包含上面所有的信息汇总,uname_result(system='Windows', node='DESKTOP-1OBE4SD', release='10', version='10.0.18362', machine='AMD64', processor='Intel64 Family 6 Model 142 Stepping 10, GenuineIntel')
| [
"noreply@github.com"
] | wanghan79.noreply@github.com |
006359c0751853ad080769a798f31b41f16548e4 | 248b1c62e3f06e82c6fc5ef557cc1af506763d8c | /cogs/give.py | 9aa565d35f362044d249dda6dbf9d27208029fe1 | [
"MIT"
] | permissive | virtualCrypto-discord/VCrypto-Utilities | bd091ba5fbccf7d59e137ee5ba5b2077bd01b6de | f735159cc45f8601f5d9a50f2c61ca6ec09d87ed | refs/heads/master | 2023-03-12T19:23:30.833926 | 2021-03-03T07:06:50 | 2021-03-03T07:06:50 | 344,033,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | from discord.ext import commands
from bot import MyBot
from virtualcrypto import AsyncVirtualCryptoClient
import discord
class Give(commands.Cog):
def __init__(self, bot: MyBot):
self.bot = bot
self.vcrypto: AsyncVirtualCryptoClient = self.bot.vcrypto
@commands.command()
@commands.has_permissions(administrator=True)
async def give(self, ctx: commands.Context, amount: int, *, users):
"""メンションした相手全てに通貨を配布します。"""
currency = await self.vcrypto.get_currency_by_guild(ctx.guild.id)
if currency is None:
await ctx.send("このサーバーでは通貨は作成されていません。")
return
users = ctx.message.mentions
if len(users) > 15:
await ctx.send("15人までに配布できます。")
return
balance = await self.bot.get_balance(ctx.guild.id)
if balance.amount < amount * len(users):
await ctx.send(f"通貨が{amount * len(users) - balance.amount}{currency.unit}足りません。")
return
await ctx.send("配布しています...")
for user in users:
await self.vcrypto.create_user_transaction(
unit=currency.unit,
receiver_discord_id=user.id,
amount=amount
)
await self.bot.refresh_cache()
await ctx.send("配布完了しました。")
def setup(bot):
return bot.add_cog(Give(bot))
| [
"sumito@izumita.com"
] | sumito@izumita.com |
e7c80e9ae3396015fadb7df42e1afe4f03dcf766 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/third_party/ruamel/yaml/__init__.py | 9be208205ea694383b43ab60c013defe2f5e4b6e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 2,997 | py | # coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
# install_requires of ruamel.base is not really required but the old
# ruamel.base installed __init__.py, and thus a new version should
# be installed at some point
_package_data = dict(
full_package_name="ruamel.yaml",
version_info=(0, 11, 11),
author="Anthon van der Neut",
author_email="a.van.der.neut@ruamel.eu",
description="ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order", # NOQA
entry_points=None,
install_requires=dict(
any=[],
py26=["ruamel.ordereddict"],
py27=["ruamel.ordereddict"]
),
ext_modules=[dict(
name="_ruamel_yaml",
src=["ext/_ruamel_yaml.c", "ext/api.c", "ext/writer.c", "ext/dumper.c",
"ext/loader.c", "ext/reader.c", "ext/scanner.c", "ext/parser.c",
"ext/emitter.c"],
lib=[],
# test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser; /* prevent warning */\nreturn 0;\n}\n' # NOQA
)
],
classifiers=[
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: Jython",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup"
],
windows_wheels=True,
read_the_docs='yaml',
)
# < from ruamel.util.new import _convert_version
def _convert_version(tup):
"""create a PEP 386 pseudo-format conformant string from tuple tup"""
ret_val = str(tup[0]) # first is always digit
next_sep = "." # separator for next extension, can be "" or "."
for x in tup[1:]:
if isinstance(x, int):
ret_val += next_sep + str(x)
next_sep = '.'
continue
first_letter = x[0].lower()
next_sep = ''
if first_letter in 'abcr':
ret_val += 'rc' if first_letter == 'r' else first_letter
elif first_letter in 'pd':
ret_val += '.post' if first_letter == 'p' else '.dev'
return ret_val
# <
version_info = _package_data['version_info']
__version__ = _convert_version(version_info)
del _convert_version
try:
from .cyaml import * # NOQA
__with_libyaml__ = True
except (ImportError, ValueError): # for Jython
__with_libyaml__ = False
# body extracted to main.py
try:
from .main import * # NOQA
except ImportError:
from ruamel.yaml.main import * # NOQA
| [
"toork@uw.edu"
] | toork@uw.edu |
61deb11fc76a069f92fdd3010e5cabe67388e589 | ea6c97980ca32a61c325d0934e463399bed53b6a | /app/migrations/0005_auto_20210727_2343.py | 2b84a58e65de5d8bf67aacb653ae4ecc52e7bbea | [] | no_license | WambiruL/chat-app | 4201ffe97e3c565669e2212ab337ac28e27bce2b | 7e445afd61d5b01599fedeea60fbea33a3459d07 | refs/heads/master | 2023-06-26T06:57:30.467106 | 2021-07-29T08:27:49 | 2021-07-29T08:27:49 | 389,715,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-07-27 20:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0004_auto_20210727_2300'),
]
operations = [
migrations.DeleteModel(
name='Room',
),
migrations.AlterModelOptions(
name='message',
options={'ordering': ['date_created']},
),
migrations.RenameField(
model_name='message',
old_name='date',
new_name='date_created',
),
migrations.RemoveField(
model_name='message',
name='room',
),
migrations.RemoveField(
model_name='message',
name='user',
),
migrations.AddField(
model_name='message',
name='receiver',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='received_messages', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='message',
name='seen',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='message',
name='sender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_messages', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='message',
name='message',
field=models.TextField(),
),
]
| [
"wambirulorraine@gmail.com"
] | wambirulorraine@gmail.com |
3f7ef408f371de3afba3738f038af48d2f611471 | 994216797f013a4a8926a5689aabd1653bf93a18 | /pkg/fibonacci.py | 2bcc9c0bb1f7b9295999beea7cc96da1577b469d | [] | no_license | Jamie-Cheon/Python | 2387a0e3abef7e7ed594e994d812faa1b322ce19 | 8f0351e58baae762b2cb2f5b2ce12d99358459e1 | refs/heads/master | 2022-06-15T13:47:42.368609 | 2020-05-08T09:27:21 | 2020-05-08T09:27:21 | 262,274,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | class Fibonacci:
def __init__(self, title="fibonacci"):
#title="fibonacci" 타이틀로 다른게 들어오면 다른걸 쓰고 타이틀이 안넘어오면 피보나치를 타이틀에 넣어라
self.title = title
def fib(n):
a, b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a+b
print()
def fib2(n):
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a+b
return result
| [
"jamiecheon55@gmail.com"
] | jamiecheon55@gmail.com |
de522ff95f318f182e74c4886331fdfdbb87dc3e | cf54adda6874a4256401e9e4eb28f353b28ae74b | /python-modules/MySQLdb-dict-cursor.py | 41d807e759d6e536a2a6e039d2ab55a92e114269 | [] | no_license | oraant/study | c0ea4f1a7a8c3558c0eac4b4108bc681a54e8ebf | 7bce20f2ea191d904b4e932c8d0abe1b70a54f7e | refs/heads/master | 2020-09-23T02:08:07.279705 | 2016-11-21T06:30:26 | 2016-11-21T06:30:26 | 66,995,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # coding:utf-8
import MySQLdb
from MySQLdb.cursors import DictCursor
conn = MySQLdb.connect('localhost','django','django','test',3306)
cursor = conn.cursor(cursorclass=DictCursor)
cursor.execute('select ID, name from ttt')
print cursor.fetchall()
# 结论:
# 数据库中的NULL,在Python中会变成None
# 列填的是大写,字典的键就是大写
| [
"oraant777@gmail.com"
] | oraant777@gmail.com |
e07f99f6027d5c036e5441b11219946a60d927ef | 0514c992dc9dd2c54bc757c8ca4487ca3a8434c5 | /miscell/demo.py | 2eacb3cdde0514b7505c11818392e249cd913b8f | [] | no_license | amalmhn/PythonDjangoProjects | 0878fffad4350d135197ceb0612dd0765b075bb7 | 3a065bb93135075c78a7bff965f83e605d8de4bc | refs/heads/master | 2023-04-25T05:50:16.768383 | 2021-05-03T06:50:06 | 2021-05-03T06:50:06 | 315,811,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | size=int(input("Enter the size of the stack"))
stk=[]
top=0
n=1.
def push(element):
global top
if top>=size:
print('stack is full')
else:
stk.insert(top,element)
print('Element pushed')
top+=1
print('Top is now',top)
def pop():
global top
if top<=0:
print('Stack is empty')
else:
print(stk[top-1],'popped')
top-=1
def display():
global top
for i in range(0,top):
print(stk[i])
while n!=0:
option=int(input('Enter operation you want to perform 1)Push 2)Pop 3)Display'))
if option==1:
element=int(input('Enter the element'))
push(element)
elif option==2:
pop()
elif option==3:
display()
else:
print('Invalid option')
n=int(input('Press "1" for continue, "0" for exit')) | [
"amalmhnofficial@gmail.com"
] | amalmhnofficial@gmail.com |
4423bfc12a256ce5fcc27ad43ef15cdf7d0537a8 | 93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3 | /python/paddle/fluid/tests/unittests/test_bilinear_api.py | 24eae4797de85f371ed62e78c85b160f698ee9eb | [
"Apache-2.0"
] | permissive | hutuxian/Paddle | f8b7693bccc6d56887164c1de0b6f6e91cffaae8 | a1b640bc66a5cc9583de503e7406aeba67565e8d | refs/heads/develop | 2023-08-29T19:36:45.382455 | 2020-09-09T09:19:07 | 2020-09-09T09:19:07 | 164,977,763 | 8 | 27 | Apache-2.0 | 2023-06-16T09:47:39 | 2019-01-10T02:50:31 | Python | UTF-8 | Python | false | false | 2,376 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
class TestBilinearAPI(unittest.TestCase):
def test_api(self):
with fluid.program_guard(fluid.default_startup_program(),
fluid.default_main_program()):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
data1 = fluid.data(name='X1', shape=[5, 5], dtype='float32')
data2 = fluid.data(name='X2', shape=[5, 4], dtype='float32')
layer1 = np.random.random((5, 5)).astype('float32')
layer2 = np.random.random((5, 4)).astype('float32')
bilinear = paddle.nn.Bilinear(
in1_features=5, in2_features=4, out_features=1000)
ret = bilinear(data1, data2)
exe.run(fluid.default_startup_program())
ret_fetch = exe.run(feed={'X1': layer1,
'X2': layer2},
fetch_list=[ret.name])
self.assertEqual(ret_fetch[0].shape, (5, 1000))
class TestBilinearAPIDygraph(unittest.TestCase):
def test_api(self):
paddle.disable_static()
layer1 = np.random.random((5, 5)).astype('float32')
layer2 = np.random.random((5, 4)).astype('float32')
bilinear = paddle.nn.Bilinear(
in1_features=5, in2_features=4, out_features=1000)
ret = bilinear(paddle.to_tensor(layer1), paddle.to_tensor(layer2))
self.assertEqual(ret.shape, [5, 1000])
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | hutuxian.noreply@github.com |
e383d2fc173e8a7434d12f2956d71aa7370dc271 | f3aa6bf16293beb94c7f63df28e8dfd27c8b603f | /codes/contest/leetcode/3sum.py | 3347c3dd94b823257a8e51db0eb1d02325f52e1f | [] | no_license | Farenew/dirtysalt.github.io | b8a9ddd7787fd0659b478584682ec97e8e9be0b3 | 856e84adf22f6c82e55f5a7f843fbccfdf17109f | refs/heads/master | 2020-06-17T02:42:25.392147 | 2019-07-08T01:59:46 | 2019-07-08T01:59:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | #!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
# class Solution(object):
# def threeSum(self, nums):
# """
# :type nums: List[int]
# :rtype: List[List[int]]
# """
# nums.sort()
# n = len(nums)
# ans = []
# dedup = set()
#
# for i in range(n):
# target = 0 - nums[i]
# j, k = i + 1, n - 1
# while j < k:
# value = nums[j] + nums[k]
# if value == target:
# a, b, c = nums[i], nums[j], nums[k]
# value = (a, b, c)
# if value not in dedup:
# ans.append(value)
# dedup.add(value)
# j += 1
# elif value > target:
# k -= 1
# else:
# j += 1
# return ans
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
n = len(nums)
ans = []
for i in range(n):
if i > 0 and nums[i] == nums[i - 1]: continue
target = 0 - nums[i]
j, k = i + 1, n - 1
while j < k:
value = nums[j] + nums[k]
if value == target:
a, b, c = nums[i], nums[j], nums[k]
value = (a, b, c)
ans.append(value)
j += 1
k -= 1
while j < k and nums[j] == nums[j - 1]:
j += 1
while j < k and nums[k] == nums[k + 1]:
k -= 1
elif value > target:
k -= 1
while j < k and nums[k] == nums[k + 1]:
k -= 1
else:
j += 1
while j < k and nums[j] == nums[j - 1]:
j += 1
return ans
if __name__ == '__main__':
s = Solution()
print(s.threeSum([-1, 0, 1, 2, -1, -4]))
print(s.threeSum([-4, -2, -2, -2, 0, 1, 2, 2, 2, 3, 3, 4, 4, 6, 6]))
print(s.threeSum([0] * 512))
| [
"dirtysalt1987@gmail.com"
] | dirtysalt1987@gmail.com |
7d38c9579b7e1f455d55e64c1c3aae3797d0cbdf | 6b791247919f7de90c8402abcca64b32edd7a29b | /lib/coginvasion/hood/DDSafeZoneLoader.py | 99815be7db2a7e56f45b1f5ada014a7d5b35295d | [
"Apache-2.0"
] | permissive | theclashingfritz/Cog-Invasion-Online-Dump | a9bce15c9f37b6776cecd80b309f3c9ec5b1ec36 | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | refs/heads/master | 2021-01-04T06:44:04.295001 | 2020-02-14T05:23:01 | 2020-02-14T05:23:01 | 240,434,213 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.hood.DDSafeZoneLoader
from lib.coginvasion.holiday.HolidayManager import HolidayType
import SafeZoneLoader, DDPlayground
class DDSafeZoneLoader(SafeZoneLoader.SafeZoneLoader):
def __init__(self, hood, parentFSM, doneEvent):
SafeZoneLoader.SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)
self.playground = DDPlayground.DDPlayground
self.pgMusicFilename = 'phase_6/audio/bgm/DD_nbrhood.mid'
self.interiorMusicFilename = 'phase_6/audio/bgm/DD_SZ_activity.mid'
self.battleMusicFile = 'phase_3.5/audio/bgm/encntr_general_bg.mid'
self.invasionMusicFiles = [
'phase_12/audio/bgm/BossBot_CEO_v1.mid',
'phase_9/audio/bgm/encntr_suit_winning.mid']
self.tournamentMusicFiles = [
'phase_3.5/audio/bgm/encntr_nfsmw_bg_1.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_2.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_3.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_4.ogg']
self.bossBattleMusicFile = 'phase_7/audio/bgm/encntr_suit_winning_indoor.mid'
self.dnaFile = 'phase_6/dna/donalds_dock_sz.pdna'
self.szStorageDNAFile = 'phase_6/dna/storage_DD_sz.pdna'
self.szHolidayDNAFile = None
if base.cr.holidayManager.getHoliday() == HolidayType.CHRISTMAS:
self.szHolidayDNAFile = 'phase_6/dna/winter_storage_DD_sz.pdna'
self.telescope = None
self.birdNoise = 'phase_6/audio/sfx/SZ_DD_Seagull.ogg'
return
def load(self):
SafeZoneLoader.SafeZoneLoader.load(self)
hq = self.geom.find('**/*toon_landmark_hqDD*')
hq.find('**/doorFrameHoleLeft_0').stash()
hq.find('**/doorFrameHoleRight_0').stash()
hq.find('**/doorFrameHoleLeft_1').stash()
hq.find('**/doorFrameHoleRight_1').stash()
def enter(self, requestStatus):
SafeZoneLoader.SafeZoneLoader.enter(self, requestStatus)
self.hood.setWhiteFog()
def exit(self):
self.hood.setNoFog()
SafeZoneLoader.SafeZoneLoader.exit(self) | [
"theclashingfritz@users.noreply.github.com"
] | theclashingfritz@users.noreply.github.com |
e1db71bb2fd5a9128fd41ae825d57ec3fa777beb | ec635c82b4516c19c0f86489420222e3ee68ef72 | /nnlib/networks/resnet_cifar.py | 8c3b8bf14981b8c30c5cafec58f702f720d3b5ec | [
"MIT"
] | permissive | keyboardAnt/nnlib | 1e29443db7f71c5b87a36c923a32e9b15225f8cb | 8062403d6d5bd57af7047c68a295d19be980f8e7 | refs/heads/master | 2022-12-12T07:40:44.873705 | 2020-09-15T08:13:35 | 2020-09-15T08:13:35 | 295,259,611 | 1 | 0 | null | 2020-09-13T23:57:20 | 2020-09-13T23:57:19 | null | UTF-8 | Python | false | false | 4,323 | py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, norm_layer=nn.BatchNorm2d):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = norm_layer(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
norm_layer(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, norm_layer=nn.BatchNorm2d):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = norm_layer(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.norm_layer(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, norm_layer=nn.BatchNorm2d):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, norm_layer=norm_layer)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, norm_layer=norm_layer)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride, norm_layer):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, norm_layer=norm_layer))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet18(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, norm_layer=norm_layer)
def resnet34(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, norm_layer=norm_layer)
def resnet50(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, norm_layer=norm_layer)
def resnet101(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, norm_layer=norm_layer)
def resnet152(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes, norm_layer=norm_layer)
| [
"harhro@gmail.com"
] | harhro@gmail.com |
78c6b04fab58b471b567bedf62f83b9b7b4a6599 | 1af426c1eb2fc76624854e604cef3d568303f670 | /paper/fig_full_map.py | 7e3940eb04a59761b50997b0a03f5034c1c6c76d | [
"BSD-3-Clause"
] | permissive | ACTCollaboration/tilec | 51f0bbb8f83013fd4c3cdc95e33f8ba88b6a3e56 | 29cbd055c360c67b8676e1cf8534049c0f1dd16e | refs/heads/master | 2022-02-17T19:26:18.113206 | 2022-02-08T04:38:51 | 2022-02-08T04:38:51 | 152,158,730 | 1 | 1 | NOASSERTION | 2020-05-06T08:09:34 | 2018-10-08T23:17:23 | Jupyter Notebook | UTF-8 | Python | false | false | 4,267 | py | from __future__ import print_function
from orphics import maps,io,cosmology,catalogs
from pixell import enmap,reproject
import numpy as np
import os,sys,shutil
from soapack import interfaces as sints
import healpy as hp
version = "map_v1.2.0_joint"
cversion = "v1.2.0"
down = 6
# nilc = hp.read_alm("/scratch/r/rbond/msyriac/data/planck/data/pr2/COM_CompMap_Compton-SZMap-nilc-ymaps_2048_R2.00_alm.fits")
# annot = 'paper/all_planck_act.csv'
#annot = 'paper/all_planck_clusters.csv'
#annot = 'paper/public_clusters.csv'
#annot = None
t = {'deep56': 2, 'boss':4}
sels = {'deep56':np.s_[...,220:-220,300:-300] , 'boss':np.s_[...,450:-450,500:-500]}
for region in ['boss','deep56']:
yname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_comptony_%s.fits" % (version,region,region,version)
ybname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_comptony_%s_beam.txt" % (version,region,region,version)
cname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_cmb_deprojects_comptony_%s.fits" % (version,region,region,version)
cbname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_cmb_deprojects_comptony_%s_beam.txt" % (version,region,region,version)
sname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_cmb_%s.fits" % (version,region,region,version)
sbname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_cmb_%s_beam.txt" % (version,region,region,version)
mname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_mask.fits" % (version,region)
# shutil.copy(yname,"/scratch/r/rbond/msyriac/data/for_sigurd/")
# shutil.copy(sname,"/scratch/r/rbond/msyriac/data/for_sigurd/")
# shutil.copy(mname,"/scratch/r/rbond/msyriac/data/for_sigurd/")
# continue
mask = maps.binary_mask(enmap.read_map(mname))
# Planck
cols = catalogs.load_fits("/scratch/r/rbond/msyriac/data/planck/data/J_A+A_594_A27.fits",['RAdeg','DEdeg'])
ras = cols['RAdeg']
decs = cols['DEdeg']
# ACT
cols = catalogs.load_fits("paper/E-D56Clusters.fits",['RAdeg','DECdeg'])
ras = np.append(ras,cols['RAdeg'])
decs = np.append(decs,cols['DECdeg'])
if region=='boss':
radius = 10
width = 2
fontsize = 28
elif region=='deep56':
radius = 6
width = 1
fontsize = 16
#annot = 'paper/temp_all_clusters.csv'
annot = None
# catalogs.convert_catalog_to_enplot_annotate_file(annot,ras,
# decs,radius=radius,width=width,
# color='red',mask=mask,threshold=0.99)
# dm = sints.PlanckHybrid(region=mask)
# pmap = dm.get_splits(season=None,patch=None,arrays=['545'],ncomp=1,srcfree=False)[0,0,0]
ymap = enmap.read_map(yname)*mask
smap = enmap.read_map(sname)*mask
# nmap = reproject.enmap_from_healpix(nilc, mask.shape, mask.wcs, ncomp=1, unit=1, lmax=0,
# rot="gal,equ", first=0, is_alm=True, return_alm=False, f_ell=None)
# io.hplot(nmap[sels[region]],'fig_full_nmap_%s' % region,color='gray',grid=True,colorbar=True,
# annotate=annot,min=-1.25e-5,max=3.0e-5,ticks=t[region],mask=0,downgrade=down,mask_tol=1e-14)
# io.hplot(pmap[sels[region]],'fig_full_pmap_%s' % region,color='planck',grid=True,colorbar=True,
# ticks=t[region],downgrade=down)
# io.hplot(ymap[sels[region]],'fig_full_ymap_%s' % region,color='gray',grid=True,colorbar=True,
# annotate=annot,min=-1.25e-5,max=3.0e-5,ticks=t[region],mask=0,downgrade=down,mask_tol=1e-14,font_size=fontsize)
io.hplot(ymap[sels[region]],'fig_full_ymap_%s' % region,color='gray',grid=True,colorbar=True,
annotate=annot,min=-0.7e-5,max=2.0e-5,ticks=t[region],mask=0,downgrade=down,mask_tol=1e-14,font_size=fontsize)
io.hplot(smap[sels[region]],'fig_full_smap_%s' % region,color='planck',grid=True,colorbar=True,
range=300,ticks=t[region],mask=0,downgrade=down,mask_tol=1e-14,font_size=fontsize)
| [
"mathewsyriac@gmail.com"
] | mathewsyriac@gmail.com |
261cfed4856dceb90491461d4404f4480a20f972 | 13cccbc1bbaec02f53d2f4e654d480512f6c2bb5 | /ds/segment-tree/stone.py | 08d8c7025ff275abc465f50eeeb164d642083106 | [] | no_license | sjdeak/interview-practice | 580cc61ec0d20d548bbc1e9ebebb4a64cd7ac2dc | 1746aaf5ab06603942f9c85c360e319c110d4df8 | refs/heads/master | 2020-07-20T21:06:23.864208 | 2019-09-08T10:54:16 | 2019-09-08T10:54:16 | 206,709,284 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,535 | py | # 黑白石头 https://www.jisuanke.com/course/804/41868
import unittest
class SegmentTreeIntervalSet:
def __init__(self, A):
self.length = len(A)
# 由数组表示的树,下标从0开始
# 存的是区间内最长的连续1的个数
self.data = [0] * 4 * self.length
# lazySet[p] = val: p指代的区间内的所有项都置为val
self.lazySet = [0] * 4 * self.length
# build
for i, n in enumerate(A):
self.update(i, n)
def query(self, st, ed):
"""
查询[st,ed]的区间和
"""
def _query(p, l, r, x, y):
if x <= l <= r <= y:
return self.data[p]
self._down(p, l, r)
mid = (l + r) // 2
res = 0
if x <= mid:
res += _query(p * 2 + 1, l, mid, x, y)
if y > mid:
res += _query(p * 2 + 2, mid + 1, r, x, y)
return res
return _query(0, 0, self.length - 1, st, ed)
def update(self, x, v):
"""
把第x个结点置为v
"""
def _update(p, l, r):
print('[update] p, l, r:', p, l, r)
if l == r:
self.data[p] = v
return
mid = (l + r) // 2
if x <= mid:
_update(p * 2 + 1, l, mid)
else:
_update(p * 2 + 2, mid + 1, r)
# push child's value up to the parent
self.data[p] = self.data[p * 2 + 1] + self.data[p * 2 + 2]
_update(0, 0, self.length - 1)
def _down(self, p, l, r):
"""
如果p节点有懒标记,就向下执行一层增加
"""
if self.lazySet[p]:
mid = (l + r) // 2
self.data[p * 2 + 1] = self.lazySet[p] * (mid - l + 1)
self.data[p * 2 + 2] = self.lazySet[p] * (r - (mid + 1) + 1)
# 分别传递给左右子节点
self.lazySet[p * 2 + 1] = self.lazySet[p * 2 + 2] = self.lazySet[p]
# 传递结束
self.lazySet[p] = 0
def intervalToggle(self, x, y):
def _toggle(p, l, r):
print('[intervalToggle] p, l, r, x, y, val:', p, l, r, x, y)
print('self.data:', self.data)
if x <= l <= r <= y: # p指向[l,r]区间,[l,r]被[x,y]完整包含,不需要再修改子节点
self.data[p] = (r - l + 1) * val
self.lazySet[p] = val # 增加标记,表示子结点还没有被修改
return # 不需要再往下前进 因为[l,r]被[x,y]完整包含
self._down(p, l, r)
mid = (l + r) // 2
if x <= mid:
_toggle(p * 2 + 1, l, mid)
if y > mid:
_toggle(p * 2 + 2, mid + 1, r)
# push up child's value
self.data[p] = self.data[p * 2 + 1] + self.data[p * 2 + 2]
_toggle(0, 0, self.length - 1)
class Test(unittest.TestCase):
def testSum(self):
# 测试数据来自 帕吉的肉钩 https://www.jisuanke.com/course/804/41866
t = SegmentTreeIntervalSet([1, 0, 1, 0])
self.assertEqual(1, t.query(0, 3))
t.intervalToggle(1, 2)
self.assertEqual(2, t.query(0, 3))
t.intervalToggle(2, 2)
self.assertEqual(0, t.query(3, 3))
# self.assertEqual(t.query(0, 9), 25)
# self.assertEqual(t.query(0, 1), 4)
# t.update(0, 100)
# self.assertEqual(t.query(0,1), 102)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testInit']
unittest.main()
"""
# t = SegmentTreeSum([1] * 10)
# t.intervalAdd(0, 4, 1)
t = SegmentTreeSumIntervalAdd([1] * 10)
# t.intervalAdd(0, 4, 1) # [2,2,2,2,2,1,1,1,1,1]
t.intervalSet(4, 8, 3) # [1,1,1,1,3,3,3,3,3,1]
# self.assertEqual(t.query(0, 9), 25)
print('t.data:', t.data)
print('t.query(0, 9):', t.query(0, 9))
print('t.lazySet:', t.lazySet)
"""
| [
"sjdeak@yahoo.com"
] | sjdeak@yahoo.com |
b9ce71918ff5f859c0e3130615e632b06972dabc | 509e9d64744f720392fda2b978d783f985c60824 | /python2.7/site-packages/numpy/lib/_iotools.py | a5419a22f81b6e42d7cd2b917a50bb9b08c7f2ee | [] | no_license | theideasmith/Instant-OpenCV-FFMPEG | 0560598fba630ded533b4e6c111c61c9b0b7502b | 234e359af245b4832b3e7ade6070e91c81b65de0 | refs/heads/master | 2021-01-11T20:39:33.443984 | 2017-01-16T23:09:46 | 2017-01-16T23:09:46 | 79,162,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1659736f7079dac7416a1915b72802e506a32c2717f3b731db4d61c567506fcd
size 32062
| [
"aclscientist@gmail.com"
] | aclscientist@gmail.com |
0fa8f09c57c7c3c70066e95565ea32012c2724da | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02608/s445397347.py | bb422d2ccfcddc22d586f269320686308b27139a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | n = int(input())
M = int(n**(0.5))
ans = [0]*(n+1)
for x in range(1,M+1):
for y in range(1,10**2):
for z in range(1,10**2):
if x**2+y**2+z**2+x*y+y*z+z*x > n:
break
ans[x**2+y**2+z**2+x*y+y*z+z*x] += 1
if x**2+y**2 > n:
break
for i in range(n):
print(ans[i+1]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4795e440d021ea6c20464178c5e259094ae896c1 | f023692f73992354a0b7823d9c49ae730c95ab52 | /tool/datastructure.py | 153f8ccd287d1ff772731de54504c2d56135f836 | [] | no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 8,403 | py | class BinaryIndexedTree:
"""
l = [1, 2, 3, 4, 5, 6, 7, 8] のlistを例とした場合、
以下のような範囲での演算結果(sum)を配列に持つ。
1: [1, 2, 3, 4, 5, 6, 7, 8]
2: [1, 2, 3, 4]
3: [1, 2] [5, 6]
4: [1] [3] [5] [7]
1 ~ r までの結果S(r)を、各層で必要な演算済みのデータを使うことで log(N) で計算できる.
l ~ r までの結果は S(r) - S(l - 1) で同じくlog(N)計算できる.
データ構造の作成は N*log(N).
配列データは1始まりとして計算.
長さ n + 1 (0 ~ n) の配列にデータを持ち, データ内の対象要素を l ~ r とすると, 配列の r 番目が格納先となる.
また対象要素の数は r の LSB(Least Significant Bit) に一致する.
転倒数の計算にも使える.
"""
def __init__(self, n):
"""
:param n: num of date.
"""
self.num = n
self.tree = [0] * (n + 1)
def add(self, k, x):
"""
:param k: [1, self.num]
:param x: add num.
:return: None
"""
while k <= self.num:
self.tree[k] += x
k += k & -k
def sum(self, k):
"""
1 ~ k までの合計
:param k:
:return:
"""
re = 0
while k > 0:
re += self.tree[k]
k -= k & -k
return re
def sum_lr(self, l, r):
"""
sum of form l to r
:param l: 1 <= l <= r
:param r: l <= r <= self.num
:return:
"""
return self.sum(r) - self.sum(l - 1)
class SegTree:
"""
セグメントツリー
参考:
https://algo-logic.info/segment-tree/#toc_id_1
https://qiita.com/takayg1/items/c811bd07c21923d7ec69
--イメージ--------
1 1 1 1 1 1 1 1
2 2 2 2 3 3 3 3
4 4 5 5 6 6 7 7
8 9 10 11 12 13 14 15 <- ここに配列の素の値が入る
------------------
同じ番号の列すべての func 結果を配列に持つ
"""
def __init__(self, elm, func, default):
"""
:param elm: 配列
:param func: 操作関数(f(x, y))
:param default: 単位元
"""
# create val
self.num = 1 << (len(elm) - 1).bit_length()
self.func = func
self.tree = [default] * 2 * self.num
self.default = default
# update leaf
for i in range(len(elm)):
self.tree[self.num + i] = elm[i]
# update nodes
for i in range(self.num - 1, 0, -1):
self.tree[i] = self.func(self.tree[i * 2], self.tree[i * 2 + 1])
def element(self, k):
"""
要素の取得
:param k: elm の要素番号
:return:
"""
return self.tree[self.num + k]
def update(self, k, x):
"""
要素k の値を x に更新する
:param k: elm の要素番号
:param x:
:return:
"""
k = self.num + k
self.tree[k] = x
while k > 1:
k = k // 2
self.tree[k] = self.func(self.tree[k * 2], self.tree[k * 2 + 1])
def query(self, l, r):
"""
[l, r) の結果を取得する
:param l: 0 始まりで指定する
:param r:
:return:
"""
res_l = self.default
res_r = self.default
l += self.num
r += self.num
while l < r:
if l & 1:
res_l = self.func(res_l, self.tree[l])
l += 1
if r & 1:
res_r = self.func(self.tree[r - 1], res_r)
l >>= 1
r >>= 1
return self.func(res_l, res_r)
class BinaryTrie:
"""
遅いので改良する。
数値の順序付き集合を管理するクラス。
特定の数値より大きく最小の値/小さく最大の値等を高速に求められる(ようにしたい)。
参考:
https://kazuma8128.hatenablog.com/entry/2018/05/06/022654
"""
def __init__(self, b):
self.bit_size = b
self.b_node = BinaryNode()
def insert(self, x):
"""
x を追加する
:param x:
:return:
"""
self.b_node.add_node(x, self.bit_size)
def delete(self, x):
"""
x を削除する
:param x:
:return:
"""
self.b_node.del_node(x, self.bit_size)
def max_element(self):
pass
def min_element(self):
pass
def lower_bound(self, x):
"""
x 以下で要素中最大の値の要素番号を返す。番号は1始まり。
:param x:
:return:
"""
return self.b_node.lower_bound(x, self.bit_size)
def upper_bound(self, x):
"""
x 以上で要素中最小の値の要素番号を返す。番号は1始まり。
:param x:
:return:
"""
return self.b_node.num - self.b_node.upper_bound(x, self.bit_size) + 1
def kth_element(self, k):
"""
k 番目の要素の値を返す。番号は1始まり。
:param k:
:return:
"""
return self.b_node.kth_element(k, self.bit_size)
class BinaryNode:
"""
BinaryTrie 内で使用するサブクラス。
引数や戻り値の要素位置は1始まり。
"""
def __init__(self):
self.num = 0
self.pointer = [None, None]
def __add_pointer(self, x):
self.pointer[x] = \
BinaryNode() if self.pointer[x] is None else self.pointer[x]
def __del_pointer(self, x):
self.pointer[x] = None
def add_node(self, x, b):
"""
x をノードに追加する
:param x: 追加する値
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
self.num += 1
return self.num
t = x >> b & 1
self.__add_pointer(t)
self.pointer[t].add_node(x, b - 1)
self.num += 1
def del_node(self, x, b):
"""
x をノードから削除する
:param x: 削除する値
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
self.num = 0
return self.num
t = x >> b & 1
if self.pointer[t].del_node(x, b - 1) == 0:
self.__del_pointer(t)
self.num -= 1
return self.num
def upper_bound(self, x, b):
"""
x 以上の値の要素の個数
:param x: 検索値
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
return 1
re = 0
if x >> b & 1 == 1:
if self.pointer[1] is not None:
re += self.pointer[1].upper_bound(x, b - 1)
else:
if self.pointer[0] is not None:
re += self.pointer[0].upper_bound(x, b - 1)
if self.pointer[1] is not None:
re += self.pointer[1].num
return re
def lower_bound(self, x, b):
"""
x 以下の要素の個数
:param x: 検索値
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
return 1
re = 0
if x >> b & 1 == 1:
if self.pointer[0] is not None:
re += self.pointer[0].num
if self.pointer[1] is not None:
re += self.pointer[1].lower_bound(x, b - 1)
else:
if self.pointer[0] is not None:
re += self.pointer[0].lower_bound(x, b - 1)
return re
def kth_element(self, k, b):
"""
k番目の要素の値
:param k: 検索要素番号
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
return 0
re = 0
if self.pointer[0] is not None:
if k <= self.pointer[0].num:
re += self.pointer[0].kth_element(k, b - 1)
else:
re += 1 << b
re += self.pointer[1].kth_element(k - self.pointer[0].num,
b - 1)
else:
re += 1 << b
re += self.pointer[1].kth_element(k, b - 1)
return re
| [
"39874652+corutopi@users.noreply.github.com"
] | 39874652+corutopi@users.noreply.github.com |
a037c30b4cf938b6c88e676a97e3fb1218ac58f5 | 45c01f01483b09ff738be19df6b183ec9bf38504 | /bin/combinatorial_fitness.py | 0cc08316d553e9b5db3df4f9d799942fe0fcda81 | [
"MIT"
] | permissive | brianhie/viral-mutation | 77787e74cb3868ef227aca50b13b3a4c439d4564 | 81c80d41671670eb58cc46e957a1b0c4bf14856a | refs/heads/master | 2023-04-16T18:42:34.118126 | 2022-02-16T16:22:23 | 2022-02-16T16:22:23 | 247,753,138 | 117 | 46 | MIT | 2023-03-24T22:45:39 | 2020-03-16T15:52:08 | Python | UTF-8 | Python | false | false | 8,010 | py | from utils import Counter, SeqIO
from Bio.Seq import translate
import numpy as np
def load_doud2016():
strain = 'h1'
fname = 'data/influenza/escape_doud2018/WSN1933_H1_HA.fa'
wt_seq = SeqIO.read(fname, 'fasta').seq
seqs_fitness = {}
fname = ('data/influenza/fitness_doud2016/'
'Supplemental_File_2_HApreferences.txt')
with open(fname) as f:
muts = f.readline().rstrip().split()[4:]
for line in f:
fields = line.rstrip().split()
pos = int(fields[0]) - 1
orig = fields[1]
assert(wt_seq[pos] == orig)
data = [ float(field) for field in fields[3:] ]
assert(len(muts) == len(data))
for mut, pref in zip(muts, data):
mutable = [ aa for aa in wt_seq ]
assert(mut.startswith('PI_'))
mutable[pos] = mut[-1]
mut_seq = ''.join(mutable)
assert(len(mut_seq) == len(wt_seq))
if (mut_seq, strain) not in seqs_fitness:
seqs_fitness[(mut_seq, strain)] = [ {
'strain': strain,
'fitnesses': [ pref ],
'preferences': [ pref ],
'wildtype': wt_seq,
'mut_pos': [ pos ],
} ]
else:
seqs_fitness[(mut_seq, strain)][0][
'fitnesses'].append(pref)
seqs_fitness[(mut_seq, strain)][0][
'preferences'].append(pref)
for fit_key in seqs_fitness:
seqs_fitness[fit_key][0]['fitness'] = np.median(
seqs_fitness[fit_key][0]['fitnesses']
)
seqs_fitness[fit_key][0]['preference'] = np.median(
seqs_fitness[fit_key][0]['preferences']
)
return { strain: wt_seq }, seqs_fitness
def load_haddox2018():
strain_names = [ 'BF520', 'BG505' ]
strains = {}
seqs_fitness = {}
for strain in strain_names:
wt_seq = translate(SeqIO.read(
'data/hiv/fitness_haddox2018/'
'{}_env.fasta'.format(strain), 'fasta'
).seq).rstrip('*')
strains[strain] = wt_seq
fname = 'data/hiv/fitness_haddox2018/{}_to_HXB2.csv'.format(strain)
pos_map = {}
with open(fname) as f:
f.readline() # Consume header.
for line in f:
fields = line.rstrip().split(',')
pos_map[fields[1]] = (fields[2], int(fields[0]) - 1)
fname = ('data/hiv/fitness_haddox2018/{}_avgprefs.csv'
.format(strain))
with open(fname) as f:
mutants = f.readline().rstrip().split(',')[1:]
for line in f:
fields = line.rstrip().split(',')
orig, pos = pos_map[fields[0]]
assert(wt_seq[int(pos)] == orig)
preferences = [ float(field) for field in fields[1:] ]
assert(len(mutants) == len(preferences))
for mut, pref in zip(mutants, preferences):
mutable = [ aa for aa in wt_seq ]
mutable[pos] = mut
mut_seq = ''.join(mutable)
if (mut_seq, strain) not in seqs_fitness:
seqs_fitness[(mut_seq, strain)] = [ {
'strain': strain,
'fitnesses': [ pref ],
'preferences': [ pref ],
'wildtype': wt_seq,
'mut_pos': [ pos ],
} ]
else:
seqs_fitness[(mut_seq, strain)][0][
'fitnesses'].append(pref)
seqs_fitness[(mut_seq, strain)][0][
'preferences'].append(pref)
for fit_key in seqs_fitness:
seqs_fitness[fit_key][0]['fitness'] = np.median(
seqs_fitness[fit_key][0]['fitnesses']
)
seqs_fitness[fit_key][0]['preference'] = np.median(
seqs_fitness[fit_key][0]['preferences']
)
return strains, seqs_fitness
def load_wu2020():
mut_pos = [
156, 158, 159, 190, 193, 196
]
offset = 16 # Amino acids in prefix.
mut_pos = [ pos - 1 + offset for pos in mut_pos ]
names = [
'HK68', 'Bk79', 'Bei89', 'Mos99', 'Bris07L194', 'NDako16',
]
wildtypes = [
'KGSESV', 'EESENV', 'EEYENV', 'QKYDST', 'HKFDFA', 'HNSDFA',
]
# Load full wildtype sequences.
wt_seqs = {}
fname = 'data/influenza/fitness_wu2020/wildtypes.fa'
for record in SeqIO.parse(fname, 'fasta'):
strain_idx = names.index(record.description)
wt = wildtypes[strain_idx]
for aa, pos in zip(wt, mut_pos):
assert(record.seq[pos] == aa)
wt_seqs[names[strain_idx]] = record.seq
# Load mutants.
seqs_fitness = {}
fname = 'data/influenza/fitness_wu2020/data_pref.tsv'
with open(fname) as f:
f.readline()
for line in f:
fields = line.rstrip().split('\t')
mut, strain, fitness, preference = fields
if strain == 'Bris07P194':
continue
if strain == 'Bris07':
strain = 'Bris07L194'
fitness = float(preference)
preference = float(preference)
strain_idx = names.index(strain)
wt = wildtypes[strain_idx]
full_seq = wt_seqs[strain]
mutable = [ aa for aa in full_seq ]
for aa_wt, aa, pos in zip(wt, mut, mut_pos):
assert(mutable[pos] == aa_wt)
mutable[pos] = aa
mut_seq = ''.join(mutable)
if (mut_seq, strain) not in seqs_fitness:
seqs_fitness[(mut_seq, strain)] = []
seqs_fitness[(mut_seq, strain)].append({
'strain': strain,
'fitness': fitness,
'preference': preference,
'wildtype': full_seq,
'mut_pos': mut_pos,
})
return wt_seqs, seqs_fitness
def load_starr2020():
strain = 'sars_cov_2'
wt_seq = SeqIO.read('data/cov/cov2_spike_wt.fasta', 'fasta').seq
seqs_fitness = {}
with open('data/cov/starr2020cov2/binding_Kds.csv') as f:
f.readline()
for line in f:
fields = line.replace('"', '').rstrip().split(',')
if fields[5] == 'NA':
continue
log10Ka = float(fields[5])
mutants = fields[-2].split()
mutable = [ aa for aa in wt_seq ]
mut_pos = []
for mutant in mutants:
orig, mut = mutant[0], mutant[-1]
pos = int(mutant[1:-1]) - 1 + 330
assert(wt_seq[pos] == orig)
mutable[pos] = mut
mut_pos.append(pos)
mut_seq = ''.join(mutable)
if (mut_seq, strain) not in seqs_fitness:
seqs_fitness[(mut_seq, strain)] = [ {
'strain': strain,
'fitnesses': [ log10Ka ],
'preferences': [ log10Ka ],
'wildtype': wt_seq,
'mut_pos': mut_pos,
} ]
else:
seqs_fitness[(mut_seq, strain)][0][
'fitnesses'].append(log10Ka)
seqs_fitness[(mut_seq, strain)][0][
'preferences'].append(log10Ka)
for fit_key in seqs_fitness:
seqs_fitness[fit_key][0]['fitness'] = np.median(
seqs_fitness[fit_key][0]['fitnesses']
)
seqs_fitness[fit_key][0]['preference'] = np.median(
seqs_fitness[fit_key][0]['preferences']
)
print(len(seqs_fitness))
return { strain: wt_seq }, seqs_fitness
if __name__ == '__main__':
load_starr2020()
exit()
load_doud2016()
load_haddox2018()
load_wu2020()
| [
"brianhie@mit.edu"
] | brianhie@mit.edu |
63c93fdc3a6a5121bd821bc2b6e51bfc13572d01 | 90af9781544352a3ae2e4f33f1c21cf4cd7a18ba | /scripts/sptk/libs/utils.py | 871478ece70e32e4d63464fecea3d8c21a3083c1 | [
"Apache-2.0"
] | permissive | ronggan/setk | 65d5a079185c69d29035a07449cfe5497c844203 | c1df07f5acb5d631ec4a6d6bdbae0507cc9c9dfc | refs/heads/master | 2020-06-14T08:44:32.387932 | 2019-06-07T12:47:52 | 2019-06-07T12:47:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,608 | py | #!/usr/bin/env python
# wujian@2018
import os
import math
import errno
import warnings
import logging
import librosa as audio_lib
# using wf to handle wave IO because it support better than librosa
import scipy.io.wavfile as wf
import scipy.signal as ss
import numpy as np
MAX_INT16 = np.iinfo(np.int16).max
EPSILON = np.finfo(np.float32).eps
__all__ = [
"stft", "istft", "get_logger", "make_dir", "filekey", "write_wav",
"read_wav"
]
def nfft(window_size):
# nextpow2
return 2**math.ceil(math.log2(window_size))
def cmat_abs(cmat):
"""
In [4]: c = np.random.rand(500, 513) + np.random.rand(500, 513)*1j
In [5]: %timeit np.abs(c)
5.62 ms +- 1.75 us per loop (mean +- std. dev. of 7 runs, 100 loops each)
In [6]: %timeit np.sqrt(c.real**2 + c.imag**2)
2.4 ms +- 4.25 us per loop (mean +- std. dev. of 7 runs, 100 loops each)
"""
if not np.iscomplexobj(cmat):
raise RuntimeError(
"function cmat_abs expect complex as input, but got {}".format(
cmat.dtype))
return np.sqrt(cmat.real**2 + cmat.imag**2)
def write_wav(fname, samps, fs=16000, normalize=True):
"""
Write wav files in int16, support single/multi-channel
"""
if normalize:
samps = samps * MAX_INT16
# scipy.io.wavfile.write could write single/multi-channel files
# for multi-channel, accept ndarray [Nsamples, Nchannels]
if samps.ndim != 1 and samps.shape[0] < samps.shape[1]:
samps = np.transpose(samps)
samps = np.squeeze(samps)
# same as MATLAB and kaldi
samps_int16 = samps.astype(np.int16)
fdir = os.path.dirname(fname)
if fdir and not os.path.exists(fdir):
os.makedirs(fdir)
# NOTE: librosa 0.6.0 seems could not write non-float narray
# so use scipy.io.wavfile instead
wf.write(fname, fs, samps_int16)
def read_wav(fname, normalize=True, return_rate=False):
"""
Read wave files using scipy.io.wavfile(support multi-channel)
"""
# samps_int16: N x C or N
# N: number of samples
# C: number of channels
samp_rate, samps_int16 = wf.read(fname)
# N x C => C x N
samps = samps_int16.astype(np.float)
# tranpose because I used to put channel axis first
if samps.ndim != 1:
samps = np.transpose(samps)
# normalize like MATLAB and librosa
if normalize:
samps = samps / MAX_INT16
if return_rate:
return samp_rate, samps
return samps
# return F x T or T x F(tranpose=True)
def stft(samps,
frame_len=1024,
frame_hop=256,
round_power_of_two=True,
center=False,
window="hann",
apply_abs=False,
apply_log=False,
apply_pow=False,
transpose=True):
"""
STFT wrapper, using librosa
"""
if apply_log and not apply_abs:
warnings.warn("Ignore apply_abs=False because apply_log=True")
apply_abs = True
if samps.ndim != 1:
raise RuntimeError("Invalid shape, librosa.stft accepts mono input")
# pad fft size to power of two or left it same as frame length
n_fft = nfft(frame_len) if round_power_of_two else frame_len
if window == "sqrthann":
window = ss.hann(frame_len, sym=False)**0.5
# orignal stft accept samps(vector) and return matrix shape as F x T
# NOTE for librosa.stft:
# 1) win_length <= n_fft
# 2) if win_length is None, win_length = n_fft
# 3) if win_length < n_fft, pad window to n_fft
stft_mat = audio_lib.stft(samps,
n_fft,
frame_hop,
win_length=frame_len,
window=window,
center=center)
# stft_mat: F x T or N x F x T
if apply_abs:
stft_mat = cmat_abs(stft_mat)
if apply_pow:
stft_mat = np.power(stft_mat, 2)
if apply_log:
stft_mat = np.log(np.maximum(stft_mat, EPSILON))
if transpose:
stft_mat = np.transpose(stft_mat)
return stft_mat
# accept F x T or T x F(tranpose=True)
def istft(stft_mat,
frame_len=1024,
frame_hop=256,
center=False,
window="hann",
transpose=True,
norm=None,
power=None,
nsamps=None):
"""
iSTFT wrapper, using librosa
"""
if transpose:
stft_mat = np.transpose(stft_mat)
if window == "sqrthann":
window = ss.hann(frame_len, sym=False)**0.5
# orignal istft accept stft result(matrix, shape as FxT)
samps = audio_lib.istft(stft_mat,
frame_hop,
win_length=frame_len,
window=window,
center=center,
length=nsamps)
# keep same amplitude
if norm:
samps_norm = np.linalg.norm(samps, np.inf)
samps = samps * norm / (samps_norm + EPSILON)
# keep same power
if power:
samps_pow = np.linalg.norm(samps, 2)**2 / samps.size
samps = samps * np.sqrt(power / samps_pow)
return samps
def griffin_lim(magnitude,
frame_len=1024,
frame_hop=256,
window="hann",
center=True,
transpose=True,
epochs=100):
# TxF -> FxT
if transpose:
magnitude = np.transpose(magnitude)
n_fft = nfft(frame_len)
angle = np.exp(2j * np.pi * np.random.rand(*magnitude.shape))
samps = audio_lib.istft(magnitude * angle,
frame_hop,
frame_len,
window=window,
center=center)
for _ in range(epochs):
stft_mat = audio_lib.stft(samps,
n_fft,
frame_hop,
frame_len,
window=window,
center=center)
angle = np.exp(1j * np.angle(stft_mat))
samps = audio_lib.istft(magnitude * angle,
frame_hop,
frame_len,
window=window,
center=center)
return samps
def filekey(path):
"""
Return unique index from file name
"""
fname = os.path.basename(path)
if not fname:
raise ValueError("{}(Is directory path?)".format(path))
token = fname.split(".")
if len(token) == 1:
return token[0]
else:
return '.'.join(token[:-1])
def get_logger(
name,
format_str="%(asctime)s [%(pathname)s:%(lineno)s - %(levelname)s ] %(message)s",
date_format="%Y-%m-%d %H:%M:%S",
file=False):
"""
Get logger instance
"""
def get_handler(handler):
handler.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=format_str, datefmt=date_format)
handler.setFormatter(formatter)
return handler
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
if file:
logger.addHandler(get_handler(logging.FileHandler(name)))
else:
logger.addHandler(logging.StreamHandler())
return logger
def make_dir(fdir):
"""
Make directory
"""
if not fdir or os.path.exists(fdir):
return
try:
os.makedirs(fdir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise RuntimeError("Error exists when mkdir -p {}".format(fdir)) | [
"funcwj@foxmail.com"
] | funcwj@foxmail.com |
7d7a1006b9e7cc31093802d2443ca503d2227810 | bd542286bccf42a61697c97e3eed86508ddab9c4 | /CarManagement/Car/migrations/0026_carimage.py | 8365a66ae44c74a2155a5fdf0763a574a83f7a55 | [] | no_license | mahadi-interconnection/DjangoReactCarManagement | 031929e9752db83fb0059be666f1acd8c64a1c03 | cbf2d04207b73956e5942894d78651a6dc22902e | refs/heads/master | 2020-06-03T19:43:34.984121 | 2019-06-09T17:38:58 | 2019-06-09T17:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Generated by Django 2.0.5 on 2019-05-16 03:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Car', '0025_auto_20190513_1633'),
]
operations = [
migrations.CreateModel(
name='CarImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='')),
('car', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='Car.Car')),
],
),
]
| [
"streametanvir@gmail.com"
] | streametanvir@gmail.com |
ff524307c8551c7850b38755517425b7c3d311eb | be18547cef4591a551321a8c78cb6b28aafa3f0d | /pumpp/feature/fft.py | 7b7ea9255de43ed713bd5b208c64976dbbaf99cd | [
"ISC"
] | permissive | Manojkl/pumpp | f1f2a476a95af548f2096c8834ba308a6e3892bc | 18d3d843d5e5e505888057fed20e58e545f4baaa | refs/heads/master | 2022-11-29T01:40:47.159380 | 2019-08-21T12:20:31 | 2019-08-21T12:20:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | #!/usr/bin/env python
"""STFT feature extractors"""
import numpy as np
from librosa import stft, magphase
from librosa import amplitude_to_db, get_duration
from librosa.util import fix_length
from .base import FeatureExtractor
from ._utils import phase_diff, to_dtype
__all__ = ['STFT', 'STFTMag', 'STFTPhaseDiff']
class STFT(FeatureExtractor):
'''Short-time Fourier Transform (STFT) with both magnitude
and phase.
Attributes
----------
name : str
The name of this transformer
sr : number > 0
The sampling rate of audio
hop_length : int > 0
The hop length of STFT frames
n_fft : int > 0
The number of FFT bins per frame
log : bool
If `True`, scale magnitude in decibels.
Otherwise use linear magnitude.
conv : str
Convolution mode
dtype : np.dtype
The data type for the output features. Default is `float32`.
Setting to `uint8` will produce quantized features.
See Also
--------
STFTMag
STFTPhaseDiff
'''
def __init__(self, name, sr, hop_length, n_fft, log=False, conv=None, dtype='float32'):
super(STFT, self).__init__(name, sr, hop_length, conv=conv, dtype=dtype)
self.n_fft = n_fft
self.log = log
self.register('mag', 1 + n_fft // 2, self.dtype)
self.register('phase', 1 + n_fft // 2, self.dtype)
def transform_audio(self, y):
'''Compute the STFT magnitude and phase.
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT magnitude
data['phase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT phase
'''
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
D = stft(y, hop_length=self.hop_length,
n_fft=self.n_fft)
D = fix_length(D, n_frames)
mag, phase = magphase(D)
if self.log:
mag = amplitude_to_db(mag, ref=np.max)
return {'mag': to_dtype(mag.T[self.idx], self.dtype),
'phase': to_dtype(np.angle(phase.T)[self.idx], self.dtype)}
class STFTPhaseDiff(STFT):
'''STFT with phase differentials
See Also
--------
STFT
'''
def __init__(self, *args, **kwargs):
super(STFTPhaseDiff, self).__init__(*args, **kwargs)
phase_field = self.pop('phase')
self.register('dphase', 1 + self.n_fft // 2, phase_field.dtype)
def transform_audio(self, y):
'''Compute the STFT magnitude and phase differential.
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT phase
'''
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
D = stft(y, hop_length=self.hop_length,
n_fft=self.n_fft)
D = fix_length(D, n_frames)
mag, phase = magphase(D)
if self.log:
mag = amplitude_to_db(mag, ref=np.max)
phase = phase_diff(np.angle(phase.T)[self.idx], self.conv)
return {'mag': to_dtype(mag.T[self.idx], self.dtype),
'dphase': to_dtype(phase, self.dtype)}
class STFTMag(STFT):
'''STFT with only magnitude.
See Also
--------
STFT
'''
def __init__(self, *args, **kwargs):
super(STFTMag, self).__init__(*args, **kwargs)
self.pop('phase')
def transform_audio(self, y):
'''Compute the STFT
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
The STFT magnitude
'''
data = super(STFTMag, self).transform_audio(y)
data.pop('phase')
return data
| [
"brian.mcfee@nyu.edu"
] | brian.mcfee@nyu.edu |
332501bffec57d4ff4b2b4cca896d08b65b8d3ba | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/update_instance_option.py | 4de1ea36bdbe9710839a0c8d412c9b2d57353a81 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,941 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateInstanceOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'description': 'str'
}
attribute_map = {
'name': 'name',
'description': 'description'
}
def __init__(self, name=None, description=None):
"""UpdateInstanceOption - a model defined in huaweicloud sdk"""
self._name = None
self._description = None
self.discriminator = None
if name is not None:
self.name = name
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this UpdateInstanceOption.
修改后的边缘实例名称, 只能由中文字符、英文字母、数字及“_”、“-”、“.”组成。
:return: The name of this UpdateInstanceOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateInstanceOption.
修改后的边缘实例名称, 只能由中文字符、英文字母、数字及“_”、“-”、“.”组成。
:param name: The name of this UpdateInstanceOption.
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this UpdateInstanceOption.
描述, 不能包含“<”,“>”。
:return: The description of this UpdateInstanceOption.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateInstanceOption.
描述, 不能包含“<”,“>”。
:param description: The description of this UpdateInstanceOption.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateInstanceOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
57970b46f486cd918e78eb5d99b794e00dedb0cf | 67430c0de6ba62ff6faca80e5de5d9fedf45e2f1 | /to_mrp_backdate/models/mrp_workcenter_productivity.py | fa89c4bf74daae418dbca47f64ff51ea53ce1ea6 | [] | no_license | blue-connect/inl_extra_addons_oe13 | 2bc62d1eeeff3a450a0891f37aca614bca7050bd | 58144a02ce00abd3cf86dd3b84dfae8163eb6d26 | refs/heads/master | 2022-12-24T04:15:46.991096 | 2020-09-29T10:38:14 | 2020-09-29T10:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from odoo import api, fields, models
class MrpWorkcenterProductivity(models.Model):
_inherit = 'mrp.workcenter.productivity'
backdate = fields.Datetime(string='Backdate', help="If filled, this date and time will be used instead"
" of the current date and time")
def button_block(self):
self.ensure_one()
if self.backdate:
super(MrpWorkcenterProductivity, self.with_context(manual_validate_date_time=self.backdate)).button_block()
else:
super(MrpWorkcenterProductivity, self).button_block()
| [
"kikin.kusumah@gmail.com"
] | kikin.kusumah@gmail.com |
377e861bde090e01e900aa2779b55f2f6fb308cd | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /未完成题目/1501-1550/1538/1538_Python_1.py | c467812968377363498155d449155bd2f2f4ffd6 | [] | no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # """
# This is the ArrayReader's API interface.
# You should not implement it, or speculate about its implementation
# """
class ArrayReader(object):
# Compares the sum of arr[l..r] with the sum of arr[x..y]
# return 1 if sum(arr[l..r]) > sum(arr[x..y])
# return 0 if sum(arr[l..r]) == sum(arr[x..y])
# return -1 if sum(arr[l..r]) < sum(arr[x..y])
def compareSub(self, l: int, r: int, x: int, y: int) -> int:
pass
# Returns the length of the array
def length(self) -> int:
pass
class Solution:
def guessMajority(self, reader: 'ArrayReader') -> int:
pass
if __name__ == "__main__":
pass
| [
"1278729001@qq.com"
] | 1278729001@qq.com |
d79939e1962f8f6b1da5021ec40c097fad37386b | 5352ad5f07ae81c6406d20c018f68d29788b2290 | /exps/standalone/diff_sims/mem_ops.py | 054efdf780b2cd1d8b08a13f850125a4a23530b4 | [
"Apache-2.0"
] | permissive | sillywalk/GRSan | 9b23c95c272fa06fcaaec4fee33e22eb523a9319 | a0adb1a90d41ff9006d8c1476546263f728b3c83 | refs/heads/master | 2020-06-08T20:53:24.667084 | 2019-06-27T14:56:40 | 2019-06-27T14:56:40 | 193,304,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,735 | py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
def pa(lb, arr):
sys.stdout.write(lb+': ')
sys.stdout.write('[')
for a in arr:
sys.stdout.write('{:0.2E} '.format(a))
print(']')
def pi(lb, *ints):
sys.stdout.write(lb+': ')
for i in ints:
sys.stdout.write('{:0.2E} '.format(i))
print()
def weighted_avg(arr):
weights = np.hamming(len(arr))
return np.sum(np.multiply(np.array(arr), weights/np.sum(weights)))
def get_gauss(sigma):
def gaussian(x_j, i, j):
return x_j / (np.sqrt(2*np.pi*sigma**2)) *\
np.exp(-(i - j)**2/(2*sigma**2))
return gaussian
def get_d_gauss_di(sigma):
def d_gauss_di(x_j, i, j):
return - x_j / (np.sqrt(2*np.pi*sigma**6)) *\
np.exp(-(i - j)**2/(2*sigma**2)) *\
(i - j)
return d_gauss_di
def get_d_gauss_dj(sigma):
def d_gauss_dj(x_j, i, j):
return - x_j / (np.sqrt(2*np.pi*sigma**6)) *\
np.exp(-(i - j)**2/(2*sigma**2)) *\
(i - j)
return d_gauss_dj
def abs_exp(x_j, i, j):
return x_j * np.exp(-np.abs(i - j))
def d_abs_exp(x_j, i, j):
return -x_j * np.exp(-np.abs(i - j)) * (i - j) / np.abs(i - j)
def sim_read(A, i, filt, use_log=False):
if (use_log):
print(max(A)*0.000001)
A = np.log(A+max(A)*0.000001)
y = np.zeros(len(i))
for j in range(len(A)):
y += filt(A[j], i, j)
return y
def dyidi(A, i, d_filt = get_d_gauss_di(1.0), use_log=False):
if (use_log):
A = np.log(A + max(A)*0.000001)
if (getattr(i, "len", False)):
dy = np.zeros(len(i))
for j in range(len(A)):
dy += d_filt(A[j], i, j)
return dy
# if (type(i) == int):
else:
dy = 0.0
for j in range(len(A)):
dy += d_filt(A[j], i, j)
return dy
def dread(A, ind, didxin, v=False):
samples = [-1, 1]
N = len(A)
dydis = []
for i, s in enumerate(samples):
if (didxin != 0):
# ASSUMPTION: wrap mem if beyond bounds (specific to crc)
xs = int(ind + s*didxin)
# valid index in 0:N
modxs = xs % N
# valid dind within -N:N
boundedxs = modxs
if (xs < 0):
boundedxs -= N
dydis.append((int(A[modxs]) - int(A[ind]))/(s*boundedxs))
else:
dydis.append(0)
dydi = weighted_avg(dydis)
return dydi
def dreadxin(A, ind, didxin, v=False):
samples = [-1, 1]
N = len(A)
dydis = []
for i, s in enumerate(samples):
if (didxin != 0):
# ASSUMPTION: wrap mem if beyond bounds (specific to crc)
xs = int(ind + s*didxin)
xs = xs % N
# FIX: need to set s*didxin so derivative is wrt input not xin
# currently inconsistent with other diff ops
dydis.append((int(A[xs]) - int(A[ind]))/s)
else:
dydis.append(0)
dydi = weighted_avg(dydis)
# pi('DREAD: dydi didxin', dydi, didxin)
return dydi
def dreadxin_sim(A, ind, xin_bytes, v=False):
def sim_crc_read2(byte_arr):
value = 0xffffffff
for b in byte_arr:
v1 = b
v2 = v1 ^ value
v3 = v2 & 0xff
v4 = A[v3]
v5 = value >> 8
value = v4 ^ v5
# value = table[(ord(ch) ^ value) & 0xff] ^ (value >> 8)
return v3, v4
# sim with xin0-1, xin0+1
xins1 = int.to_bytes(int.from_bytes(xin_bytes, 'big') - 256, 2, 'big')
xins2 = int.to_bytes(int.from_bytes(xin_bytes, 'big') + 256, 2, 'big')
simx1, simy1 = sim_crc_read2(xins1)
simx2, simy2 = sim_crc_read2(xins2)
if v:
pi('sr1', simx1, simy1)
pi('sr2', simx2, simy2)
x = ind
y = A[ind]
dydi1 = (int(simy1) - int(y))/-1
dydi2 = (int(simy2) - int(y))/1
dydi = (dydi1 + dydi2) / 2
if v:
print()
return dydi
def viz_read(A, filt, d_filt):
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.bar(np.arange(len(A)), np.log(A+max(A)*0.000001), width=0.25)
plt.axhline(linewidth=1,color='gray')
plt.title('Memory & Approximation:')
all_i = np.linspace(0, len(A)-1, len(A)*25)
y = sim_read(A, all_i, filt)
plt.plot(all_i, y, 'r', linewidth=2)
plt.xlabel('memory index')
plt.subplot(1,2,2)
plt.plot(all_i, dyidi(A, all_i, d_filt),
linewidth=2)
plt.axhline(linewidth=1,color='gray')
plt.title('dydi')
plt.xlabel('i')
plt.ylabel('dydi')
def get_d_gauss_dj(sigma):
def d_gauss_dj(x_j, i, j):
return x_j / (np.sqrt(2*np.pi*sigma**6)) *\
np.exp(-(i - j)**2/(2*sigma**2)) *\
(i - j)
return d_gauss_dj
def sim_write(x_j, i, j, filt):
return filt(x_j, i, j)
def dyidj(x_j, i, j, d_filt):
return d_filt(np.log(x_j), i, j)
# return x_j / (np.sqrt(2*np.pi*sigma**6)) *\
# np.exp(-(i - j)**2/(2*sigma**2)) *\
# (i - j)
def viz_write(A, i, j, filt, d_filt):
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.bar(np.arange(len(A)), A, width=0.25)
plt.axhline(linewidth=1,color='gray')
plt.title('Memory & Approximation:')
all_j = np.linspace(0, len(A)-1, len(A)*25)
y = sim_write(A[j], i, all_j, filt)
plt.plot(all_j, y, 'r', linewidth=2)
plt.xlabel('memory index')
plt.subplot(1,2,2)
plt.plot(all_j, dyidj(A[j], i, all_j, d_filt),
linewidth=2)
plt.axhline(linewidth=1,color='gray')
plt.title('dyidj')
plt.xlabel('j')
plt.ylabel('dyidj')
| [
"i.m.ralk@gmail.com"
] | i.m.ralk@gmail.com |
c5d8e725634e1fb6e5a5fb1a9721ca6045aad126 | 24cee07743790afde5040c38ef95bb940451e2f6 | /acode/abc305/d/ans.py | 31437f4bd7bf9de4c38755d415269925e0b3c403 | [] | no_license | tinaba96/coding | fe903fb8740d115cf5a7f4ff5af73c7d16b9bce1 | d999bf5620e52fabce4e564c73b9f186e493b070 | refs/heads/master | 2023-09-01T02:24:33.476364 | 2023-08-30T15:01:47 | 2023-08-30T15:01:47 | 227,594,153 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | from bisect import *
n = int(input())
A = list(map(int, input().split())) + [10**9+1]
RA = [0]
for i, ai in enumerate(A):
if i == 0: continue
if i%2==0:
RA.append(RA[-1] + A[i] - A[i-1])
else:
RA.append(RA[-1])
# print(RA)
def solv(r):
rp = bisect_right(A, r)
# print(rp, RA[rp-1], A[rp-1], r)
if rp%2 == 0:
ret = RA[rp-1] + (r - A[rp-1])
else:
ret = RA[rp-1]
return ret
q = int(input())
for _ in range(q):
l, r = map(int, input().split())
ret = solv(r) - solv(l)
print(ret)
| [
"tinaba178.96@gmail.com"
] | tinaba178.96@gmail.com |
73a07f3af522e0ba840c37e388931825422ec3c0 | 08760dda1de398381f639ac82f70bd97a22288dc | /dicts/954_array_of_doubled_pairs.py | 1ece687e836f7062590851d0a6490593de01ef8f | [] | no_license | liketheflower/CSCI13200 | 9a8719f6ecb1295cee22bd8a4abd9556594a0d14 | 18053e4c2513ad22d26d7b4c0528b34494c0ed8b | refs/heads/master | 2020-07-11T18:37:45.797196 | 2019-12-10T17:25:58 | 2019-12-10T17:25:58 | 204,616,708 | 7 | 5 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | class Solution:
def canReorderDoubled(self, A: List[int]) -> bool:
neg, zero, pos = [], [], []
for a in A:
if a > 0:pos.append(a)
elif a<0:neg.append(-a)
else:zero.append(a)
if len(zero)%2!=0:return False
def check(a):
if not a:return True
if len(a)%2!=0:return False
cnt = collections.Counter(a)
for k in sorted(cnt.keys()):
if cnt[k]>0:
cnt[2*k] -= cnt[k]
if cnt[2*k]<0:return False
return True
return check(neg) and check(pos)
| [
"jim.morris.shen@gmail.com"
] | jim.morris.shen@gmail.com |
be57d8fe06e399091726d07a5f4e7a5ebfa57736 | 6aab2d11b3ab7619ee26319886dcfc771cbcaba5 | /0x11-python-network_1/4-hbtn_status.py | 74b861309d0771e6458caf2b759db25339833d7e | [] | no_license | IhebChatti/holbertonschool-higher_level_programming | ef592f25eb077e182a0295cb5f2f7d69c7a8ab67 | ca58262c6f82f98b2022344818e20d382cf82592 | refs/heads/master | 2022-12-18T10:06:30.443550 | 2020-09-24T17:31:30 | 2020-09-24T17:31:30 | 259,174,423 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/python3
"""[sumPython script that fetches
https://intranet.hbtn.io/statusmary]
"""
import requests
if __name__ == "__main__":
url = "https://intranet.hbtn.io/status"
req = requests.get(url)
req = req.text
print("Body response:")
print("\t- type: {}".format(type(req)))
print("\t- content: {}".format(req))
| [
"iheb.chatti@holbertonschool.com"
] | iheb.chatti@holbertonschool.com |
5047cb53e99c130ffc9ab800fd5d2f469be741cc | 0f4cacd40260137d3d0b3d1b34be58ac76fc8bd0 | /2016/advent2.py | 0d31b60fbf66e6737409d47930d2a72fe04da333 | [] | no_license | timrprobocom/advent-of-code | 45bc765e6ee84e8d015543b1f2fa3003c830e60e | dc4d8955f71a92f7e9c92a36caeb954c208c50e7 | refs/heads/master | 2023-01-06T07:19:03.509467 | 2022-12-27T18:28:30 | 2022-12-27T18:28:30 | 161,268,871 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | import sys
moves = """ULL
RRDDD
LURDL
UUUUD""".splitlines()
x,y = (2,2)
pad = ( ' ', ' 123 ', ' 456 ', ' 789 ', ' ' )
moves = open('../Downloads/day2.txt').readlines()
x,y = (4,4)
pad = (
' ',
' 1 ',
' 234 ',
' 56789 ',
' ABC ',
' D ',
' ')
for ln in moves:
for c in ln.strip():
nx,ny = x,y
if c=='R': nx += 1
elif c=='U': ny -= 1
elif c=='L': nx -= 1
elif c=='D': ny += 1
if pad[ny][nx] != ' ':
x,y=nx,ny
print pad[y][x],
| [
"timr@probo.com"
] | timr@probo.com |
3cda652059bf909ad4e252db11faa152f6204318 | bf06bf980ef359615604d53567d1cc435a980b78 | /data/HW3/hw3_306.py | 1d990ae03ca4494d616153c28f0b26f761a9e0c2 | [] | no_license | am3030/IPT | dd22f5e104daa07a437efdf71fb58f55bcaf82d7 | 6851c19b2f25397f5d4079f66dbd19ba982245c5 | refs/heads/master | 2021-01-23T05:03:53.777868 | 2017-03-09T18:10:36 | 2017-03-09T18:10:36 | 86,270,526 | 0 | 0 | null | 2017-03-26T22:53:42 | 2017-03-26T22:53:42 | null | UTF-8 | Python | false | false | 958 | py |
KELVIN_FREEZING_POINT = 273.16
KELVIN_BOILING_POINT = 373.16
CELSIUS_FREEZING_POINT = 0
CELSIUS_BOILING_POINT = 100
def main():
temp = float(input("Please enter the temperature: "))
scale = str(input("Please enter 'C' for Celsius, or 'K' for Kelvin: "))
if scale == "C":
if temp <= CELSIUS_FREEZING_POINT:
print("At this temperature, water is a (frozen) solid.")
elif CELSIUS_FREEZING_POINT < temp < CELSIUS_BOILING_POINT:
print("At this temperature, water is a liquid.")
else:
print("At this temperature, water is a gas.")
elif scale == "K":
if temp <= KELVIN_FREEZING_POINT:
print("At this temperature, water is a (frozen) solid.")
elif KELVIN_FREEZING_POINT < temp < KELVIN_BOILING_POINT:
print("At this temperature, water is a liquid.")
else:
print("At this temperature, water is a gas.")
main()
| [
"mneary1@umbc.edu"
] | mneary1@umbc.edu |
51b96d33a052a040280d116a4ef0520fd9628657 | 48a522b031d45193985ba71e313e8560d9b191f1 | /baekjoon/python/26562.py | 4dd8cc5efe23ccc1c2c30ecf7fec7171d06e1bcd | [] | no_license | dydwnsekd/coding_test | beabda0d0aeec3256e513e9e0d23b43debff7fb3 | 4b2b4878408558239bae7146bb4f37888cd5b556 | refs/heads/master | 2023-09-04T12:37:03.540461 | 2023-09-03T15:58:33 | 2023-09-03T15:58:33 | 162,253,096 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | import sys
money_dict = {"Franklin": 100, "Grant": 50, "Jackson": 20, "Hamilton": 10, "Lincoln": 5, "Washington": 1}
cases = int(sys.stdin.readline())
for _ in range(cases):
total_money = 0
wallet = list(sys.stdin.readline().split())
for w in wallet:
total_money += money_dict[w]
print(f"${total_money}")
| [
"dydwnsekd123@gmail.com"
] | dydwnsekd123@gmail.com |
d2a79e2201a7b60db1dd4df3cd2de184d10141d6 | 5f845ebbc2c9b40eea702833c91928ae90ae7ee5 | /algorithms/manasa-and-stones.py | 935737c7d38b736bd795f52b6dfb7b60373024ed | [
"MIT"
] | permissive | imgeekabhi/HackerRank | 7a1917fee5af01976aebb9c82aa1045a36487016 | 7fe4a308abad85ce446a28328324be480672e6fc | refs/heads/master | 2022-12-28T19:13:49.098090 | 2020-10-11T09:29:08 | 2020-10-11T09:29:08 | 300,023,395 | 1 | 0 | MIT | 2020-09-30T18:48:12 | 2020-09-30T18:48:11 | null | UTF-8 | Python | false | false | 379 | py | #!/bin/python3
import sys
def stones(n, a, b):
return sorted(set([(n-1)*min(a, b) + x*abs(a-b) for x in range(n)]))
if __name__ == "__main__":
T = int(input().strip())
for a0 in range(T):
n = int(input().strip())
a = int(input().strip())
b = int(input().strip())
result = stones(n, a, b)
print (" ".join(map(str, result)))
| [
"sergey.n.nemov@gmail.com"
] | sergey.n.nemov@gmail.com |
dce8e6165e091fab69df93ff3e8300d17303dba0 | da0fa8d4fdee6f8d5b52723d45e46f9cc0e55866 | /publ/cli.py | b3b3247b69dd6d2c3fb39bdc65a4e36c66e47ade | [
"MIT"
] | permissive | PlaidWeb/Publ | 3f62ceb490a29a639314dc792552d06d511012f4 | 2cc227ec975529a89eec105f63e4102b62eeddbe | refs/heads/main | 2023-08-18T08:41:48.478369 | 2023-08-08T17:38:17 | 2023-08-08T17:38:17 | 127,061,540 | 33 | 5 | MIT | 2023-09-01T20:30:18 | 2018-03-28T00:30:42 | Python | UTF-8 | Python | false | false | 5,831 | py | """ CLI utilities for Publ """
# pylint:disable=too-many-arguments
import itertools
import logging
import os.path
import re
import time
import arrow
import click
import slugify
from flask.cli import AppGroup, with_appcontext
from pony import orm
from . import queries
from .config import config
LOGGER = logging.getLogger(__name__)
publ_cli = AppGroup('publ', short_help="Publ-specific commands") # pylint:disable=invalid-name
@publ_cli.command('reindex', short_help="Reindex the content store")
@click.option('--quietly', '-q', 'quietly', is_flag=True, help="Quietly")
@click.option('--fresh', '-f', 'fresh', is_flag=True, help="Start with a fresh database")
@with_appcontext
def reindex_command(quietly, fresh):
""" Forces a reindex of the content store.
This is particularly useful to ensure that all content has been indexed
before performing another action, such as sending out notifications.
"""
from . import index, model
if fresh:
model.reset()
spinner = itertools.cycle('|/-\\')
index.scan_index(config.content_folder, False)
while index.in_progress():
if not quietly:
qlen = index.queue_size() or ''
print(f"\rIndexing... {next(spinner)} {qlen} ", end='', flush=True)
time.sleep(0.1)
if not quietly:
print("Done")
@publ_cli.command('token', short_help="Generate a bearer token")
@click.argument('identity')
@click.option('--scope', '-s', help="The token's permission scope")
@click.option('--lifetime', '-l', help="The token's lifetime (in seconds)", default=3600)
@with_appcontext
def token_command(identity, scope, lifetime):
""" Generates a bearer token for use with external applications. """
from . import tokens
print(tokens.get_token(identity, int(lifetime), scope))
@publ_cli.command('normalize', short_help="Normalize entry filenames")
@click.argument('category', nargs=-1)
@click.option('--recurse', '-r', 'recurse', is_flag=True,
help="Include subdirectories")
@click.option('--all', '-a', 'all_entries', is_flag=True,
help="Apply to all entries, not just reachable ones")
@click.option('--dry-run', '-n', 'dry_run', is_flag=True,
help="Show, but don't apply, changes")
@click.option('--format', '-f', 'format_str',
help="Filename format to use",
default="{date} {sid} {title}")
@click.option('--verbose', '-v', 'verbose', is_flag=True,
help="Show detailed actions")
@with_appcontext
@orm.db_session
def normalize_command(category, recurse, dry_run, format_str, verbose, all_entries):
""" Normalizes the filenames of content files based on a standardized format.
This will only normalize entries which are already in the content index.
If no categories are specified, it defaults to the root category. To include
the root category in a list of other categories, use an empty string parameter,
e.g.:
flask publ normalize '' blog
Available tokens for --format/-f:
{date} The entry's publish date, in YYYYMMDD format
{time} The entry's publish time, in HHMMSS format
{id} The entry's ID
{status} The entry's publish status
{sid} If the entry is reachable, the ID, otherwise the status
{title} The entry's title, normalized to filename-safe characters
{slug} The entry's slug text
{type} The entry's type
"""
# pylint:disable=too-many-locals
from .model import PublishStatus
entries = queries.build_query({
'category': category or '',
'recurse': recurse,
'_future': True,
'_all': all_entries,
})
fname_slugify = slugify.UniqueSlugify(max_length=100, safe_chars='-.', separator=' ')
for entry in entries:
path = os.path.dirname(entry.file_path)
basename, ext = os.path.splitext(os.path.basename(entry.file_path))
status = PublishStatus(entry.status)
eid = entry.id
if status == PublishStatus.DRAFT:
# Draft entries don't get a stable entry ID
eid = status.name
sid = entry.id if status in (PublishStatus.PUBLISHED,
PublishStatus.HIDDEN,
PublishStatus.SCHEDULED) else status.name
date = arrow.get(entry.local_date)
dest_basename = format_str.format(
date=date.format('YYYYMMDD'),
time=date.format('HHmmss'),
id=eid,
status=status.name,
sid=sid,
title=entry.title,
slug=entry.slug_text,
type=entry.entry_type).strip()
dest_basename = re.sub(r' +', ' ', dest_basename)
if dest_basename != basename:
while True:
# UniqueSlugify will bump the suffix until it doesn't collide
dest_path = os.path.join(path, fname_slugify(dest_basename) + ext)
if not os.path.exists(dest_path):
break
if verbose:
print(f'{entry.file_path} -> {dest_path}')
if not os.path.isfile(entry.file_path):
LOGGER.warning('File %s does not exist; is the index up-to-date?', entry.file_path)
elif os.path.exists(dest_path):
LOGGER.warning('File %s already exists', dest_path)
elif not dry_run:
try:
os.rename(entry.file_path, dest_path)
except OSError:
LOGGER.exception('Error moving %s to %s', entry.file_path, dest_path)
entry.file_path = dest_path
orm.commit()
def setup(app):
""" Register the CLI commands with the command parser """
app.cli.add_command(publ_cli)
| [
"fluffy@beesbuzz.biz"
] | fluffy@beesbuzz.biz |
bf62c010e605a1512decc17c0d3cec6837ac690e | eaf83bd07e03cb3d9934c31c5cda60040546ff3d | /tensorflow/contrib/learn/python/learn/tests/dataframe/test_column.py | ae4f36cceb91e3931f5ae2edbe607de0fed9d156 | [
"Apache-2.0"
] | permissive | anguillanneuf/tensorflow | 508d2ddcd59a0c2ba8e138bdfa0af0fecb5d8640 | 7d58c6856c4e0e3095d4a50cc23a0ff036338949 | refs/heads/master | 2021-01-22T08:32:43.794741 | 2016-06-06T15:10:07 | 2016-06-06T15:10:07 | 59,699,880 | 0 | 0 | null | 2016-05-25T21:45:20 | 2016-05-25T21:45:20 | null | UTF-8 | Python | false | false | 2,240 | py | """Tests of the Column class."""
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
class TransformedColumnTest(tf.test.TestCase):
"""Test of `TransformedColumn`."""
def test_repr(self):
col = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockTwoOutputTransform("thb", "nth", "snt"), "qux")
# note params are sorted by name
expected = ("MockTransform({'param_one': 'thb', 'param_three': 'snt', "
"'param_two': 'nth'})"
"(foobar)[qux]")
self.assertEqual(expected, repr(col))
def test_build_no_output(self):
def create_no_output_column():
return learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockZeroOutputTransform("thb", "nth"), None)
self.assertRaises(ValueError, create_no_output_column)
def test_build_single_output(self):
col = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockOneOutputTransform("thb", "nth"), "out1")
result = col.build()
expected = "Fake Tensor 1"
self.assertEqual(expected, result)
def test_build_multiple_output(self):
col = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockTwoOutputTransform("thb", "nth", "snt"), "out2")
result = col.build()
expected = "Fake Tensor 2"
self.assertEqual(expected, result)
if __name__ == "__main__":
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
62ebcc91889677c8b30ffd65ed29c5829b2a6e1f | b4f211423f51c7c3bfbc39c868aaa15c5899b1fa | /226. Invert Binary Tree.py | 3d93f299ed5775a88c748b37f1e0eae10e2d34b6 | [] | no_license | yaoyu2001/LeetCode_Practice_Python | 705971d17fb91cba9114854886ee113206d23f59 | a09bd0105c0ac9e76e9b4ef1946faa2fb8797660 | refs/heads/master | 2020-11-30T08:57:22.022268 | 2020-09-19T00:01:23 | 2020-09-19T00:01:23 | 230,361,026 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return None
right = self.invertTree(root.right)
left = self.invertTree(root.left)
root.left = right
root.right = left
return root | [
"yongchangyao2001@gmail.com"
] | yongchangyao2001@gmail.com |
9d8ab899ac69be877244f609a08e6ff88f26dcf1 | 79e1a5ad019b261034bc6338e894679d3f5d54d9 | /Regular Expression Matching.py | 7bef362e86cab1c1dbffc2ee974d0dc8261d8b1e | [
"MIT"
] | permissive | ngdeva99/Fulcrum | c615f457ec34c563199cc1dab243ecc62e23ad0b | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | refs/heads/master | 2022-12-15T19:35:46.508701 | 2020-09-09T06:47:48 | 2020-09-09T06:48:08 | 294,027,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | class Solution:
def isMatch(self, s: str, p: str) -> bool:
dp = [[False for _ in range(len(p)+1)] for _ in range(len(s)+1)]
dp[0][0] = True
#noT ALWAYS COLMNS ARE FALSE BECAUSE SOMETIMES THEY CAN match with the empty string for cases like a*b*
patterns with "" string.
#Deals with patterns like a* or a*b* or a*b*c*
for j in range(1,len(dp[0])):
if p[j-1] == "*":
dp[0][j] = dp[0][j-2]
for i in range(1,len(dp)):
for j in range(1,len(dp[0])):
if s[i-1]==p[j-1] or p[j-1]==".":
dp[i][j] = dp[i-1][j-1]
elif p[j-1]=="*":
dp[i][j] = dp[i][j-2]
if s[i-1]==p[j-2] or p[j-2]==".":
dp[i][j] |= dp[i-1][j]
else:
dp[i][j] = False
print(dp)
return dp[-1][-1]
| [
"31466229+ngdeva99@users.noreply.github.com"
] | 31466229+ngdeva99@users.noreply.github.com |
8fb708dfdd6f0ed3f00e5449bc4839c03c543bdc | da0a7446122a44887fa2c4f391e9630ae033daa2 | /python/ray/data/preprocessors/custom_stateful.py | 801f4d6f092df4bfcd672f0d8835e4814150bb1a | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | whiledoing/ray | d8d9ba09b7545e8fd00cca5cfad451278e61fffd | 9272bcbbcae1630c5bb2db08a8279f0401ce6f92 | refs/heads/master | 2023-03-06T16:23:18.006757 | 2022-07-22T02:06:47 | 2022-07-22T02:06:47 | 252,420,044 | 0 | 0 | Apache-2.0 | 2023-03-04T08:57:20 | 2020-04-02T10:07:23 | Python | UTF-8 | Python | false | false | 3,665 | py | from typing import Callable, TYPE_CHECKING, Dict
from ray.data.preprocessor import Preprocessor
from ray.data import Dataset
if TYPE_CHECKING:
import pandas
class CustomStatefulPreprocessor(Preprocessor):
"""Implements a user-defined stateful preprocessor that fits on a Dataset.
This is meant to be generic and can be used to perform arbitrary stateful
preprocessing that cannot already be done through existing preprocessors.
Logic must be defined to perform fitting on a Ray Dataset and transforming
pandas DataFrames.
Example:
.. code-block:: python
import pandas as pd
import ray.data
from pandas import DataFrame
from ray.data.preprocessors import CustomStatefulPreprocessor
from ray.data import Dataset
from ray.data.aggregate import Max
items = [
{"A": 1, "B": 10},
{"A": 2, "B": 20},
{"A": 3, "B": 30},
]
ds = ray.data.from_items(items)
def get_max_a(ds: Dataset):
# Calculate max value for column A.
max_a = ds.aggregate(Max("A"))
# {'max(A)': 3}
return max_a
def subtract_max_a_from_a_and_add_max_a_to_b(df: DataFrame, stats: dict):
# Subtract max A value from column A and subtract it from B.
max_a = stats["max(A)"]
df["A"] = df["A"] - max_a
df["B"] = df["B"] + max_a
return df
preprocessor = CustomStatefulPreprocessor(
get_max_a,
subtract_max_a_from_a_and_add_max_a_to_b
)
preprocessor.fit(ds)
transformed_ds = preprocessor.transform(ds)
expected_items = [
{"A": -2, "B": 13},
{"A": -1, "B": 23},
{"A": 0, "B": 33},
]
expected_ds = ray.data.from_items(expected_items)
assert transformed_ds.take(3) == expected_ds.take(3)
batch = pd.DataFrame(
{
"A": [5, 6],
"B": [10, 10]
}
)
transformed_batch = preprocessor.transform_batch(batch)
expected_batch = pd.DataFrame(
{
"A": [2, 3],
"B": [13, 13],
}
)
assert transformed_batch.equals(expected_batch)
Args:
fit_fn: A user defined function that computes state information about
a :class:`ray.data.Dataset` and returns it in a :class:`dict`.
transform_fn: A user defined function that takes in a
:class:`pandas.DataFrame` and the :class:`dict` computed from
``fit_fn``, and returns a transformed :class:`pandas.DataFrame`.
"""
_is_fittable = True
def __init__(
self,
fit_fn: Callable[[Dataset], Dict],
transform_fn: Callable[["pandas.DataFrame", Dict], "pandas.DataFrame"],
):
self.fit_fn = fit_fn
self.transform_fn = transform_fn
def _fit(self, dataset: Dataset) -> "Preprocessor":
self.stats_ = self.fit_fn(dataset)
return self
def _transform_pandas(self, df: "pandas.DataFrame") -> "pandas.DataFrame":
return self.transform_fn(df, self.stats_)
def __repr__(self):
fit_fn_name = getattr(self.fit_fn, "__name__", str(self.fit_fn))
transform_fn_name = getattr(
self.transform_fn, "__name__", str(self.transform_fn)
)
stats = getattr(self, "stats_", None)
return (
f"CustomStatefulPreprocessor("
f"fit_fn={fit_fn_name}, "
f"transform_fn={transform_fn_name}, "
f"stats={stats})"
)
| [
"noreply@github.com"
] | whiledoing.noreply@github.com |
fe78a5baff324bb3d049a9f7ad54a9d47eca3a4f | ead3ef1aa0d7633f9ba7cf8f4454bac7ca74f540 | /sources/RaMaK/RaMaK.py | 381cb566858cbed3ef3839cd8c1da73b4151cebc | [] | no_license | sffej/Sefaria-Data | e5c64ba436098de4294d441b0d686ad9850c9d07 | 73a0ae6ab80cdac477fd446ac69edf42b6e37919 | refs/heads/master | 2021-01-13T02:53:57.395433 | 2016-12-21T09:17:29 | 2016-12-21T09:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,286 | py | # -*- coding: utf-8 -*-
__author__ = 'stevenkaplan'
from XML_to_JaggedArray import XML_to_JaggedArray
import sys
sys.path.append('../')
from functions import *
sys.path.append('../../../')
from sefaria.model import *
'''Every node whose first element is a title is the node's title. Then remove these titles possibly.
Every other title has structural significance if it has a bold tag as a child
Titles can structure text
Footnotes
Also consider how to decipher JA_array or allowed_tags automatically
'''
def parse(text_arr):
assert type(text_arr) is list
for index, text in enumerate(text_arr):
text_arr[index] = text_arr[index].replace("<bold>", "<b>").replace("<italic>", "<i>").replace("</bold>", "</b>").replace("</italic>", "</i>")
return text_arr
def create_schema():
book = SchemaNode()
book.key = "ramak"
book.add_title(u"אור נערב", "he", primary=True)
book.add_title("Or Neerav", "en", primary=True)
intro = JaggedArrayNode()
intro.add_title("Introduction", "en", primary=True)
intro.add_title(u"הקדמה", "he", primary=True)
intro.depth = 1
intro.sectionNames = ["Paragraph"]
intro.addressTypes = ["Integer"]
intro.key = "intro"
book.append(intro)
arr = ["I", "II", "III", "IV", "V", "VI"]
for i in range(6):
pt = SchemaNode()
pt.key = "pt"+str(i)+"schema"
pt.add_title("PART " + arr[i], "en", primary=True)
pt.add_title(u"חלק "+numToHeb(1+i), "he", primary=True)
subject = JaggedArrayNode()
subject.add_title("Subject", "en", primary=True)
subject.add_title(u"נושא", "he", primary=True)
subject.key = "subject"
subject.depth = 1
subject.sectionNames = ["Paragraph"]
subject.addressTypes = ["Integer"]
default = JaggedArrayNode()
default.depth = 2
default.default = True
default.sectionNames = ["Chapter", "Paragraph"]
default.addressTypes = ["Integer", "Integer"]
default.key = "default"
pt.append(subject)
pt.append(default)
book.append(pt)
pt7 = JaggedArrayNode()
pt7.add_title("PART VII", "en", primary=True)
pt7.add_title(u"חלק ז", "he", primary=True)
pt7.depth = 1
pt7.sectionNames = ["Paragraph"]
pt7.addressTypes = ["Integer"]
pt7.key = "pt7"
book.append(pt7)
appendix = SchemaNode()
appendix.add_title("Appendix The Introductory Material", "en", primary=True)
appendix.add_title(u"נספח: הקדמות", "he", primary=True)
appendix.key = "appendix"
subject = JaggedArrayNode()
subject.add_title("Subject", "en", primary=True)
subject.add_title(u"נושא", "he", primary=True)
subject.key = "subject"
subject.depth = 1
subject.sectionNames = ["Paragraph"]
subject.addressTypes = ["Integer"]
default = JaggedArrayNode()
default.depth = 2
default.default = True
default.sectionNames = ["Chapter", "Paragraph"]
default.addressTypes = ["Integer", "Integer"]
default.key = "default"
appendix.append(subject)
appendix.append(default)
footnotes_array = ["Introduction", "PART I", "PART II", "PART III", "PART IV", "PART V", "PART VI", "PART VII", "Appendix The Introductory Material"]
footnotes_heb = [u"הקדמה", u"חלק א", u"חלק ב", u"חלק ג", u"חלק ד", u"חלק ה", u"חלק ו", u"חלק ז", u"נספח"]
footnotes = SchemaNode()
footnotes.key = "footnotes"
footnotes.add_title("Footnotes", "en", primary=True)
footnotes.add_title(u"הערות", "he", primary=True)
for i in range(len(footnotes_array)):
node = JaggedArrayNode()
if footnotes_array[i] == "Introduction" or footnotes_array[i] == "PART VII":
node.depth = 1
node.sectionNames = ["Paragraph"]
node.addressTypes = ["Integer"]
else:
node.depth = 2
node.sectionNames = ["Chapter", "Paragraph"]
node.addressTypes = ["Integer", "Integer"]
node.key = footnotes_array[i]
node.add_title(footnotes_array[i], "en", primary=True)
node.add_title(footnotes_heb[i], "he", primary=True)
footnotes.append(node)
book.append(appendix)
book.append(footnotes)
book.validate()
index = {
"title": title,
"categories": ["Kabbalah"],
"schema": book.serialize()
}
post_index(index)
if __name__ == "__main__":
post_info = {}
post_info["versionTitle"] = "hi"
post_info["versionSource"] = "hi"
post_info["language"] = "en"
allowed_tags = ["book", "intro", "part", "appendix", "chapter", "p", "ftnote", "title"]
structural_tags = ["title"] #this is not all tags with structural significance, but just
#the ones we must explicitly mention, because it has no children,
#we want what comes after it until the next instance of it to be its children anyway
allowed_attributes = ["id"]
file_name = "../sources/DC labs/Robinson_MosesCordoveroIntroductionToKabbalah.xml"
title = "Or Neerav"
ramak = XML_to_JaggedArray(title, file_name, allowed_tags, allowed_attributes, post_info, parse)
create_schema()
ramak.run()
| [
"skaplan@brandeis.edu"
] | skaplan@brandeis.edu |
acf150c4647da83352303ffcee565e243f7cd2c4 | fe22e8ffdb1b2f1e11becc027e71a7a512fe56eb | /misc/analysis_step2/calculateBTaggingEffs.py | e0d52d9a1afe7d8fabf519c6d6d0b3cb4089ff58 | [] | no_license | HEP-KBFI/stpol | 3cdb5dc125bb0394f4531abfdfe9629b0c8d0fa4 | 962837a3341dd26391025b9a07a9c1c93084bf64 | refs/heads/master | 2020-06-03T16:15:14.743807 | 2015-08-05T09:00:28 | 2015-08-05T09:00:28 | 5,716,481 | 0 | 1 | null | 2015-03-04T08:23:28 | 2012-09-07T12:27:30 | Python | UTF-8 | Python | false | false | 7,138 | py | from anfw import *
import pdb
import math
import json
#cut = Cuts.mu + Cuts.MT + Cuts.mlnu + Cuts.jetRMS + Cuts.jetPt + Cuts.jets_1LJ + Cuts.etaLJ + Cuts.recoFState #Cut("1plusLJ", "_lightJetCount>=1")
cut = Cuts.mu + Cuts.MT + Cuts.mlnu + Cuts.jetRMS + Cuts.etaLJ + Cuts.recoFState + Cuts.jetPt + Cut("1plusLJ", "_lightJetCount>=1")
#cut = Cuts.mu + Cuts.MT
print cut
def effUnc(eff, count):
return math.sqrt(eff*(1.0-eff)/count)
of = ROOT.TFile("bTaggingEffs.root", "RECREATE")
def calcBTaggingEff(channel):
print "B-tagging effs for channel {0}".format(channel)
of.cd()
hTrueB_bDiscr = ROOT.TH1F("hTrueB_BDiscr_{0}".format(channel), "true b-jet b-discriminator distribution", 1000, -100, 40)
hTrueC_bDiscr = ROOT.TH1F("hTrueC_BDiscr_{0}".format(channel), "true c-jet b-discriminator distribution", 1000, -100, 40)
hTrueL_bDiscr = ROOT.TH1F("hTrueL_BDiscr_{0}".format(channel), "true l-jet b-discriminator distribution", 1000, -100, 40)
ROOT.gROOT.cd()
#cut = Cuts.finalMu
channels[channel].tree.Draw(">>elist", cut.cutStr)
elist = ROOT.gROOT.Get("elist")
print "Number of events in selection: %d" % elist.GetN()
lepCount = {-1:0, 0: 0, 1:0, 2:0, 3:0}
sumBTaggedB = 0
sumTrueB = 0
sumBTaggedC = 0
sumTrueC = 0
sumBTaggedL = 0
sumTrueL = 0
nFailed = 0
tree = channels[channel].tree
for i in range(elist.GetN()):
tree.GetEntry(elist.GetEntry(i))
if (tree._btaggedTrueBJetCount == -1 or tree._trueBJetCount == -1 or
tree._btaggedTrueCJetCount == -1 or tree._trueCJetCount == -1 or
tree._btaggedTrueLJetCount == -1 or tree._trueLJetCount == -1
):
nFailed += 1
#print "Warning: anomalous event"
continue
nJets = tree._lightJetCount + tree._bJetCount
for i in range(min(2, nJets)):
partonFlavour = getattr(tree, "_goodJets_{0}_partonFlavour".format(i))
bDiscriminator = getattr(tree, "_goodJets_{0}_bDiscriminator".format(i))
if abs(partonFlavour)==5:
hTrueB_bDiscr.Fill(bDiscriminator)
elif abs(partonFlavour)==4:
hTrueC_bDiscr.Fill(bDiscriminator)
else:
hTrueL_bDiscr.Fill(bDiscriminator)
lepCount[tree._genLeptonsTCount] += 1
sumBTaggedB += tree._btaggedTrueBJetCount
sumTrueB += tree._trueBJetCount
sumBTaggedC += tree._btaggedTrueCJetCount
sumTrueC += tree._trueCJetCount
sumBTaggedL += tree._btaggedTrueLJetCount
sumTrueL += tree._trueLJetCount
print ("jet counts (tagged | all): B: %d | %d" % (sumBTaggedB, sumTrueB)) + ("; C: %d | %d" % (sumBTaggedC, sumTrueC)) + ("; L: %d | %d" % (sumBTaggedL, sumTrueL))
#print "Generated lepton counts: {0}".format(str(lepCount))
eff_b = float(sumBTaggedB)/float(sumTrueB)
eff_c = float(sumBTaggedC)/float(sumTrueC)
eff_l = float(sumBTaggedL)/float(sumTrueL)
sigma_eff_b = effUnc(eff_b, sumTrueB)
sigma_eff_c = effUnc(eff_c, sumTrueC)
sigma_eff_l = effUnc(eff_l, sumTrueL)
print "nFailed = {0}".format(nFailed)
def printEff(eff, sigma, flavour):
print "eff_{3} = {0:.2E} (\sigma {1:.2E}) ({2:.1%})".format(eff, sigma, sigma/eff, flavour)
printEff(eff_b, sigma_eff_b, "b")
printEff(eff_c, sigma_eff_c, "c")
printEff(eff_l, sigma_eff_l, "l")
print 80*"-"
of.Write()
return {
"count_events": elist.GetN(),
"eff_b": 100.0*eff_b, "eff_c": 100.0*eff_c, "eff_l": 100.0*eff_l,
"sigma_eff_b": 100*sigma_eff_b, "sigma_eff_c": 100*sigma_eff_c, "sigma_eff_l": 100*sigma_eff_l,
"rel_sigma_eff_b": 100.0*sigma_eff_b/eff_b, "rel_sigma_eff_c": 100.0*sigma_eff_c/eff_c, "rel_sigma_eff_l": 100.0*sigma_eff_l/eff_l,
"count_b_total": sumTrueB, "count_b_tagged": sumBTaggedB,
"count_c_total": sumTrueC, "count_c_tagged": sumBTaggedC,
"count_l_total": sumTrueL, "count_l_tagged": sumBTaggedL
}
effs = dict()
effs["T_t"] = calcBTaggingEff("T_t")
effs["WJets"] = calcBTaggingEff("WJets")
effs["TTbar"] = calcBTaggingEff("TTbar")
out = dict()
out["bTaggingEffs"] = dict()
for (chan, eff) in effs.items():
for (k, v) in eff.items():
out["bTaggingEffs"]["{0}_{1}".format(k, chan)] = v
of.Close()
ofile = open("bTaggingEffs.json", "w+")
ofile.write(json.dumps(effs))
ofile.close()
from Cheetah.Template import Template
temp = r"""
#compiler-settings
cheetahVarStartToken = @
#end compiler-settings
\begin{tabular}{ |l|c|c|c|c|c| }
\hline
MC sample & MC events in sel. & flavour & total & b-tagged & $\epsilon$ & stat. unc. \\
\hline
\multirow{3}{*}{single top, t-channel, top} & \multirow{3}{*}{@effs['T_t']['count_events']} & b & @effs['T_t']['count_b_total'] & @effs['T_t']['count_b_tagged'] & #echo '%.2f' % @effs['T_t']['eff_b']# \pm #echo '%.1f' % @effs['T_t']['rel_sigma_eff_b']#\% \\\cline{3-6}
% & & c & @effs['T_t']['count_c_total'] & @effs['T_t']['count_c_tagged'] & #echo '%.3E' % @effs['T_t']['eff_c']# & #echo '%.3E' % @effs['T_t']['sigma_eff_c']# \\\cline{3-7}
% & & l & @effs['T_t']['count_l_total'] & @effs['T_t']['count_l_tagged'] & #echo '%.3E' % @effs['T_t']['eff_l']# & #echo '%.3E' % @effs['T_t']['sigma_eff_l']# \\\cline{3-7}
% \hline
% \multirow{3}{*}{$t\bar{t}$} & \multirow{3}{*}{@effs['TTbar']['count_events']} & b & @effs['TTbar']['count_b_total'] & @effs['TTbar']['count_b_tagged'] & #echo '%.3E' % @effs['TTbar']['eff_b']# & #echo '%.3E' % @effs['TTbar']['sigma_eff_b']# \\\cline{3-7}
% & & c & @effs['TTbar']['count_c_total'] & @effs['TTbar']['count_c_tagged'] & #echo '%.3E' % @effs['TTbar']['eff_c']# & #echo '%.3E' % @effs['TTbar']['sigma_eff_c']# \\\cline{3-7}
% & & l & @effs['TTbar']['count_l_total'] & @effs['TTbar']['count_l_tagged'] & #echo '%.3E' % @effs['TTbar']['eff_l']# & #echo '%.3E' % @effs['TTbar']['sigma_eff_l']# \\\cline{3-7}
% \hline
% \multirow{3}{*}{W+Jets} & \multirow{3}{*}{@effs['WJets']['count_events']} & b & @effs['WJets']['count_b_total'] & @effs['WJets']['count_b_tagged'] & #echo '%.3E' % @effs['WJets']['eff_b']# & #echo '%.3E' % @effs['WJets']['sigma_eff_b']# \\\cline{3-7}
% & & c & @effs['WJets']['count_c_total'] & @effs['WJets']['count_c_tagged'] & #echo '%.3E' % @effs['WJets']['eff_c']# & #echo '%.3E' % @effs['WJets']['sigma_eff_c']# \\\cline{3-7}
% & & l & @effs['WJets']['count_l_total'] & @effs['WJets']['count_l_tagged'] & #echo '%.3E' % @effs['WJets']['eff_l']# & #echo '%.3E' % @effs['WJets']['sigma_eff_l']# \\\cline{3-7}
\hline
\end{tabular}
"""
print Template(temp, searchList=[{"effs": effs}]) | [
"joosep.pata@gmail.com"
] | joosep.pata@gmail.com |
4dcc4ff446eff2fef093f48a7aea4875ff8d2d3a | 6c543074f1d764af9701e5b55db9ab0220c1df93 | /03_循环/venv/bin/pip3.6 | 6dfe12f619a30cc702d37070c1f52c36606b8e5c | [] | no_license | allenlgy/Django-project | 127e984e13f71d20e01df68ad42d00b977ac0105 | 9c4b9e6c67481a5f3cef58ea47e9fd62058036d8 | refs/heads/master | 2020-06-23T01:03:03.170674 | 2019-09-04T06:11:40 | 2019-09-04T06:11:40 | 198,453,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | 6 | #!/home/linguiyi/Desktop/03_循环/venv/bin/python -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.6'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.6')()
)
| [
"1006547624@qq.com"
] | 1006547624@qq.com |
1f5a6da61f4297f4c0f993fcff3e63528df697d2 | 61f2172dfbb81aa3ad46b2063ad6baced0c94b5c | /juicer/models/inline_response2007.py | 6353cdbb447feea555aa18c46832da56cfb855a6 | [] | no_license | rochacbruno-archive/pulp3-juicer | 0005517a8b1e840b2c08ca492318f5e4b460edf3 | da9b636720281169d343a6190d6615a81f631b0b | refs/heads/master | 2020-04-29T01:24:42.736203 | 2019-03-15T02:09:43 | 2019-03-15T02:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,186 | py | # coding: utf-8
"""
Pulp3 API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from juicer.models.update_record import UpdateRecord # noqa: F401,E501
class InlineResponse2007(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[UpdateRecord]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None): # noqa: E501
"""InlineResponse2007 - a model defined in Swagger""" # noqa: E501
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
self.count = count
if next is not None:
self.next = next
if previous is not None:
self.previous = previous
self.results = results
@property
def count(self):
"""Gets the count of this InlineResponse2007. # noqa: E501
:return: The count of this InlineResponse2007. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this InlineResponse2007.
:param count: The count of this InlineResponse2007. # noqa: E501
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
self._count = count
@property
def next(self):
"""Gets the next of this InlineResponse2007. # noqa: E501
:return: The next of this InlineResponse2007. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this InlineResponse2007.
:param next: The next of this InlineResponse2007. # noqa: E501
:type: str
"""
self._next = next
@property
def previous(self):
"""Gets the previous of this InlineResponse2007. # noqa: E501
:return: The previous of this InlineResponse2007. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this InlineResponse2007.
:param previous: The previous of this InlineResponse2007. # noqa: E501
:type: str
"""
self._previous = previous
@property
def results(self):
"""Gets the results of this InlineResponse2007. # noqa: E501
:return: The results of this InlineResponse2007. # noqa: E501
:rtype: list[UpdateRecord]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this InlineResponse2007.
:param results: The results of this InlineResponse2007. # noqa: E501
:type: list[UpdateRecord]
"""
if results is None:
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2007):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"rochacbruno@gmail.com"
] | rochacbruno@gmail.com |
7082139b388aa7fe762248acdc6c4af2c6679758 | baf3996414315ffb60470c40c7ad797bf4e6897f | /10_back_dev/paradigms/functional/py_scripts/21. Handling errors in lambda expressions.py | 36cd909a7c4d14bb020ce8a13c5dc78044b7d955 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 788 | py |
# coding: utf-8
# ## Let's reconsider our lambda
# In[3]:
l_add_str = lambda s: sum([int(i) for i in s.split('+')])
# ## A Maybe-like decorator
#
# The Maybe monad is not very Pythonic. But we can do something similar using a decorator.
# In[5]:
def maybe(fnc):
def inner(*args):
for a in args:
if isinstance(a, Exception):
return a
try:
return fnc(*args)
except Exception as e:
return e
return inner
safe_add_str = maybe(lambda s: sum([int(i) for i in s.split('+')]))
print(safe_add_str(1+2))
# ## Exceptions are fine!
#
# Even though `Exception`s are not entirely compatible with a functional programming style, they are still a very good way to deal with errors!
| [
"thiago.allue@yahoo.com"
] | thiago.allue@yahoo.com |
3ae428c5c3d9934484c31bd9f055b975f15db166 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/ChRapO/A.py | 74c4807722da1da40ad33a1b289805c6a93fd531 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | import sys
cases = sys.stdin.readline()
non = ['a','e','i','o','u']
for case in range(0,int(cases)):
s,L = [v for v in sys.stdin.readline().split()]
L = int(L)
res = 0
ind = 0
sum = 0
K = []
for c in s:
if not c in non: sum += 1
else: sum = 0
if sum >= L:
K.append(ind)
ind += 1
next = 0
for i in range(0, len(s) - L + 1):
while next<len(K)-1 and i+L-1>K[next]: next+=1
if next<len(K) and K[next]-i>=L - 1:
res += len(s) - K[next]
print "Case #%d: %d" % (case+1, res)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
7665c3d92f0286971a166f11effa1d65de7fb0bc | 2dad8b725583afd64e2f381acb6a299350a069c4 | /winback/wsgi.py | 26e399e353bb46b9c222b8c1e08797b7183f46fc | [] | no_license | s4-hub/winback | 39b0b354690201a7906ce77f46c1172ddcb21110 | abfb22b6ed5d523b93ea5cdb982ac3066a63ab7c | refs/heads/master | 2020-12-22T12:27:54.416189 | 2020-02-11T10:50:30 | 2020-02-11T10:50:30 | 233,515,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for winback project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'winback.settings')
application = get_wsgi_application()
| [
"syafii.newbie@gmail.com"
] | syafii.newbie@gmail.com |
7b43a51da0f5244e8620e566618e73db3071883d | 78cc1e7a9703769cfce430bbc4ac38c874a59d47 | /backend/home/migrations/0003_message.py | 6c1a3eebe5afc3461fa8aa5be519fb3834b1d7e7 | [] | no_license | crowdbotics-apps/thefuture-21697 | 9790196ed22bb7a859ea6d8aefa1a916e998208d | e0552b0cec2b826933e7d01a73c8b434195e4f61 | refs/heads/master | 2022-12-29T18:28:04.481853 | 2020-10-19T05:35:21 | 2020-10-19T05:35:21 | 305,272,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | # Generated by Django 2.2.16 on 2020-10-19 05:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dating', '0001_initial'),
('home', '0002_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('created', models.DateTimeField(auto_now_add=True)),
('inbox', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='message_inbox', to='dating.Inbox')),
('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_match', to='dating.Match')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
19bac62d51ee3986732db67f4d6bab2ebfcd2f2a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04000/s866107546.py | 76d4bb9f3ddc109f45e5d41132929ecf15b57adf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | import sys,queue,math,copy,itertools,bisect,collections,heapq
def main():
LI = lambda : [int(x) for x in sys.stdin.readline().split()]
H,W,N = LI()
d = collections.Counter()
for _ in range(N):
x,y = LI()
for dx in range(-1,2):
for dy in range(-1,2):
nx = x+dx
ny = y+dy
if 1 < nx < H and 1 < ny < W:
d[(nx,ny)] += 1
ans = [0] * 10
for x,c in collections.Counter(d.values()).items():
ans[x] += c
ans[0] = (H-2) * (W-2) - sum(ans[1:])
print(*ans,sep='\n')
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
629b3d6a0feeebefff9e7835994f4fea2c8d79c7 | 065956c29148d6b9a52cc97f1766f2d3e9e3ad83 | /pandas/venv/Scripts/f2py.py | b74390c16600d0f3554f89e7ca0bcab017816327 | [] | no_license | HBU/PythonLearning | 17b09ad32ea0100b18f01ad489b3daa81f70594a | 18303752c44ed90beefb13725690124031381f35 | refs/heads/master | 2022-11-24T05:17:01.968082 | 2019-06-06T05:01:08 | 2019-06-06T05:01:08 | 123,361,590 | 3 | 5 | null | 2022-11-18T22:18:49 | 2018-03-01T00:53:39 | Python | UTF-8 | Python | false | false | 797 | py | #!C:\GitHub\PythonLearning\pandas\venv\Scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| [
"8584751@qq.com"
] | 8584751@qq.com |
e953bb5cba9a5f137ea0bb01359006d3dca25399 | 4090d8b4e8e9e28d620d222651c73a12a753be36 | /cases/urls.py | b90ee81d849bdb23377c093189d6fb2c10d48c89 | [] | no_license | isaev4lex/220studio | 91aa08f9d10ff55e98effe2542e26799efb6e2f2 | 6188403eeed7ee590b21da15c67af9e6f06ab06b | refs/heads/main | 2023-08-20T07:14:18.203593 | 2021-10-31T07:24:19 | 2021-10-31T07:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.cases, name='cases'),
path('<slug:category_slug>/', views.cases, name='cases_slug'),
]
| [
"FWorld21@protonmail.com"
] | FWorld21@protonmail.com |
612ca64cff2aaf1672b0799554822c9afa445102 | 6740ee4590bd30513c7e5fe185db42ca5c8fd565 | /lib/html_highlight.py | 2b27e1d9c4e0b63c3e6cf56d72e9ba1a4b6265aa | [
"MIT"
] | permissive | emmetio/sublime-text-plugin | 5a10c12e2d8c8a15b0be3b29c44e913bbfba4526 | df81e99d9655fe3ad0d7187051369a5324c6f139 | refs/heads/master | 2023-08-01T12:38:02.914935 | 2023-01-18T23:58:25 | 2023-01-18T23:58:25 | 205,387,294 | 267 | 38 | MIT | 2021-06-14T06:11:38 | 2019-08-30T13:25:18 | Python | UTF-8 | Python | false | false | 1,605 | py | import re
import html
from ..emmet.html_matcher import scan, get_attributes, ElementType
re_tag_end = re.compile(r'\s*\/?>$')
def highlight(code: str) -> str:
chunks = []
offset = [0]
def cb(name: str, elem_type: int, start: int, end: int):
if offset[0] != start:
chunks.append(escape(code[offset[0]:start]))
offset[0] = end
if elem_type == ElementType.Close:
chunks.append('<span class="tag close"></<span class="tag-name">%s</span>></span>' % name)
else:
chunks.append('<span class="tag open"><<span class="tag-name">%s</span>' % name)
for attr in get_attributes(code, start, end, name):
chunks.append(' <span class="attr">')
chunks.append('<span class="attr-name">%s</span>' % attr.name)
if attr.value is not None:
chunks.append('=<span class="attr-value">%s</span>' % attr.value)
chunks.append('</span>')
tag_end = re_tag_end.search(code[start:end])
if tag_end:
chunks.append(escape(tag_end.group(0)))
chunks.append('</span>')
scan(code, cb)
chunks.append(escape(code[offset[0]:]))
return ''.join(chunks)
def styles():
return """
.dark .tag { color: #77c7b4; }
.dark .attr-name { color: #8fd260; }
.dark .attr-value { color: #ff6e61; }
.light .tag { color: #0046aa; }
.light .attr-name { color: #017ab7; }
.light .attr-value { color: #017ab7; }
"""
def escape(code: str):
return html.escape(code, False)
| [
"serge.che@gmail.com"
] | serge.che@gmail.com |
68066c1b7cc8196473a609c02be7e95140327953 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/3346b78a8a872286a245d1e77ef4718fc5e6be1a-<has_zoneinfo_database>-bug.py | 64bd17668d26971a8bc27e797781cbcb4d0f7c4d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | @cached_property
def has_zoneinfo_database(self):
with self.connection.cursor() as cursor:
cursor.execute('SELECT 1 FROM mysql.time_zone LIMIT 1')
return (cursor.fetchone() is not None) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
afa3e2142f3568deab7b6eba89c0bd6107a71827 | c7c0aff3a367e21aeb8470c74607f882266a4bdc | /pystagram/settings.py | 982ac51073b3e076d2c92020039bea5bc90684a7 | [] | no_license | hanquf1/s5-pystagram | 30c58a97ad909120ad5dcb72c7517106aff66fe0 | a893e3a4aa8c6d7f0bde4a734f5ae9e602678692 | refs/heads/master | 2021-01-21T20:06:46.221481 | 2016-03-12T08:16:17 | 2016-03-12T08:16:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,224 | py | """
Django settings for pystagram project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f!fz*)t_l!xeio^fg@&w*gjb$&3-@pqx!d@-hp#g0c*qt^)y*g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photos',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pystagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pystagram.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/staticfile_url/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'my_static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'collected_statics')
MEDIA_URL = '/uploads/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'upload_files')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'access.log',
'formatter': 'verbose',
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'INFO',
},
},
}
| [
"kay@hannal.net"
] | kay@hannal.net |
941346ac5ae82a0bd2402d7543bd22bcb7f01648 | bd435e3ff491d13c3cb1ffcf34771ac1c80f7859 | /code/base/identity_operator.py | 9f341d5aa53d3e532e9a7e70ff8a938e3f91f132 | [] | no_license | luningcowboy/PythonTutorial | 8f4b6d16e0fad99a226540a6f12639ccdff402ff | 9024efe8ed22aca0a1271a2c1c388d3ffe1e6690 | refs/heads/master | 2021-06-16T23:03:22.153473 | 2020-04-09T13:52:12 | 2020-04-09T13:52:12 | 187,571,993 | 0 | 0 | null | 2021-03-25T23:02:36 | 2019-05-20T05:16:13 | Python | UTF-8 | Python | false | false | 440 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
a = 20
b = 20
if (a is b):
print 'a, b 有相同的标识'
else:
print 'a, b 有不同的标识'
if (a is not b):
print 'a, b 有不同的标识'
else:
print 'a, b 有相同的标识'
b = 30
if (a is b):
print 'a, b 有相同的标识'
else:
print 'a, b 有不同的标识'
if (a is not b):
print 'a, b 有不同的标识'
else:
print 'a, b 有相同的标识'
| [
"luningcowboy@gmail.com"
] | luningcowboy@gmail.com |
ec96395680b9454b0b1a4e15719186c299b15355 | 8c4e20343b8e901981f592ec420356dd5c7d3079 | /mapproxy/test/helper.py | f5a0d4b4bf35d159e0f2df7121873abfa94f876c | [
"BSD-3-Clause",
"Python-2.0",
"Bitstream-Vera",
"MIT",
"ZPL-2.1",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | jukkatolonen/mapproxy | 94337ee44b7b47d11d717bcb9d36969d8cb03ce1 | e4ad1972858dfbb72d8686c5b59751b4b781fc35 | refs/heads/master | 2021-02-10T04:00:13.079893 | 2020-03-05T15:18:16 | 2020-03-05T15:18:16 | 244,350,181 | 0 | 1 | NOASSERTION | 2020-03-03T13:48:35 | 2020-03-02T11:07:43 | null | UTF-8 | Python | false | false | 7,623 | py | # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import tempfile
import os
import re
import sys
from glob import glob as globfunc
from contextlib import contextmanager
from lxml import etree
from mapproxy.test import mocker
from mapproxy.compat import string_type, PY2
class Mocker(object):
"""
This is a base class for unit-tests that use ``mocker``. This class follows
the xUnit naming conventions for setup and teardown methods.
`setup` will initialize a `mocker.Mocker`. The `teardown` method
will run ``mocker.verify()``.
"""
def setup(self):
self.mocker = mocker.Mocker()
def expect_and_return(self, mock_call, return_val):
"""
Register a return value for the mock call.
:param return_val: The value mock_call should return.
"""
self.mocker.result(return_val)
def expect(self, mock_call):
return mocker.expect(mock_call)
def replay(self):
"""
Finish mock-record phase.
"""
self.mocker.replay()
def mock(self, base_cls=None):
"""
Return a new mock object.
:param base_cls: check method signatures of the mock-calls with this
base_cls signature (optional)
"""
if base_cls:
return self.mocker.mock(base_cls)
return self.mocker.mock()
def teardown(self):
self.mocker.verify()
class TempFiles(object):
"""
This class is a context manager for temporary files.
>>> with TempFiles(n=2, suffix='.png') as tmp:
... for f in tmp:
... assert os.path.exists(f)
>>> for f in tmp:
... assert not os.path.exists(f)
"""
def __init__(self, n=1, suffix='', no_create=False):
self.n = n
self.suffix = suffix
self.no_create = no_create
self.tmp_files = []
def __enter__(self):
for _ in range(self.n):
fd, tmp_file = tempfile.mkstemp(suffix=self.suffix)
os.close(fd)
self.tmp_files.append(tmp_file)
if self.no_create:
os.remove(tmp_file)
return self.tmp_files
def __exit__(self, exc_type, exc_val, exc_tb):
for tmp_file in self.tmp_files:
if os.path.exists(tmp_file):
os.remove(tmp_file)
self.tmp_files = []
class TempFile(TempFiles):
def __init__(self, suffix='', no_create=False):
TempFiles.__init__(self, suffix=suffix, no_create=no_create)
def __enter__(self):
return TempFiles.__enter__(self)[0]
class LogMock(object):
log_methods = ('info', 'debug', 'warn', 'error', 'fail')
def __init__(self, module, log_name='log'):
self.module = module
self.orig_logger = None
self.logged_msgs = []
def __enter__(self):
self.orig_logger = self.module.log
self.module.log = self
return self
def __getattr__(self, name):
if name in self.log_methods:
def _log(msg):
self.logged_msgs.append((name, msg))
return _log
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
def assert_log(self, type, msg):
log_type, log_msg = self.logged_msgs.pop(0)
assert log_type == type, 'expected %s log message, but was %s' % (type, log_type)
assert msg in log_msg.lower(), "expected string '%s' in log message '%s'" % \
(msg, log_msg)
def __exit__(self, exc_type, exc_val, exc_tb):
self.module.log = self.orig_logger
def assert_re(value, regex):
"""
>>> assert_re('hello', 'l+')
>>> assert_re('hello', 'l{3}')
Traceback (most recent call last):
...
AssertionError: hello ~= l{3}
"""
match = re.search(regex, value)
assert match is not None, '%s ~= %s' % (value, regex)
def assert_files_in_dir(dir, expected, glob=None):
"""
assert that (only) ``expected`` files are in ``dir``.
``filter`` can be a globbing patter, other files are ignored if it is set.
"""
if glob is not None:
files = globfunc(os.path.join(dir, glob))
files = [os.path.basename(f) for f in files]
else:
files = os.listdir(dir)
files.sort()
assert sorted(expected) == files
def validate_with_dtd(doc, dtd_name, dtd_basedir=None):
if dtd_basedir is None:
dtd_basedir = os.path.join(os.path.dirname(__file__), 'schemas')
dtd_filename = os.path.join(dtd_basedir, dtd_name)
with open(dtd_filename, 'rb') as schema:
dtd = etree.DTD(schema)
if isinstance(doc, (string_type, bytes)):
xml = etree.XML(doc)
else:
xml = doc
is_valid = dtd.validate(xml)
print(dtd.error_log.filter_from_errors())
return is_valid
def validate_with_xsd(doc, xsd_name, xsd_basedir=None):
if xsd_basedir is None:
xsd_basedir = os.path.join(os.path.dirname(__file__), 'schemas')
xsd_filename = os.path.join(xsd_basedir, xsd_name)
with open(xsd_filename, 'rb') as schema:
xsd = etree.parse(schema)
xml_schema = etree.XMLSchema(xsd)
if isinstance(doc, (string_type, bytes)):
xml = etree.XML(doc)
else:
xml = doc
is_valid = xml_schema.validate(xml)
print(xml_schema.error_log.filter_from_errors())
return is_valid
class XPathValidator(object):
def __init__(self, doc):
self.xml = etree.XML(doc)
def assert_xpath(self, xpath, expected=None):
assert len(self.xml.xpath(xpath)) > 0, xpath + ' does not match anything'
if expected is not None:
if callable(expected):
assert expected(self.xml.xpath(xpath)[0])
else:
assert self.xml.xpath(xpath)[0] == expected
def xpath(self, xpath):
return self.xml.xpath(xpath)
def strip_whitespace(data):
"""
>>> strip_whitespace(' <foo> bar\\n zing\\t1')
'<foo>barzing1'
"""
if isinstance(data, bytes):
return re.sub(b'\s+', b'', data)
else:
return re.sub('\s+', '', data)
@contextmanager
def capture(bytes=False):
if PY2:
from StringIO import StringIO
else:
if bytes:
from io import BytesIO as StringIO
else:
from io import StringIO
backup_stdout = sys.stdout
backup_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
yield sys.stdout, sys.stderr
except Exception as ex:
backup_stdout.write(str(ex))
if bytes:
backup_stdout.write(sys.stdout.getvalue().decode('utf-8'))
backup_stderr.write(sys.stderr.getvalue().decode('utf-8'))
else:
backup_stdout.write(sys.stdout.getvalue())
backup_stderr.write(sys.stderr.getvalue())
raise
finally:
sys.stdout = backup_stdout
sys.stderr = backup_stderr
| [
"olt@bogosoft.com"
] | olt@bogosoft.com |
8fbe0b915a4895d240f0b8f9a530e338488a7fde | be7cdd0c8e55a8fec0d1b226c2ea1664f28636a5 | /steps/step39.py | 1f3824de4816a17de355a3d6b1cfa5e9647f3311 | [
"MIT"
] | permissive | peaceiris/deep-learning-from-scratch-3 | 8274ba7735dc36a86305bfe3db683423aff2095e | 6f05c642cb6d1ee1f25f5b3ed538f1d1d19a4a08 | refs/heads/master | 2021-03-25T02:19:09.161809 | 2020-03-13T02:16:18 | 2020-03-13T02:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | import os, sys;
if '__file__' in globals():
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable
import dezero.functions as F
x = Variable(np.array([1, 2, 3, 4, 5, 6]))
y = F.sum(x)
y.backward()
print(y)
print(x.grad)
x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
y = F.sum(x)
y.backward()
print(y)
print(x.grad)
x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
y = F.sum(x, axis=0)
y.backward()
print(y)
print(x.grad)
x = Variable(np.random.randn(2, 3, 4, 5))
y = x.sum(keepdims=True)
print(y.shape) | [
"koki0702@gmail.com"
] | koki0702@gmail.com |
e67818904e5a156fb0c9b8bb42a92efd60ef479d | 0421da0c3ba42e4758cb4da4679f3218d4ea733d | /setup_cython.py | 4a9d2d872ad922a9379f7f8eecc98ad916038591 | [] | no_license | vanife/Mandelbrot_pyCUDA_Cython_Numpy | f252a6773791414468859a77e8ed0d6bd1dc2586 | baa164a392078019dffc886981f0107ad3f0fca1 | refs/heads/master | 2021-01-18T01:54:56.383453 | 2010-10-29T16:54:44 | 2010-10-29T16:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
ext = Extension("mandel", ["mandelbrot_cython.pyx"],
include_dirs = [numpy.get_include()])
setup(ext_modules=[ext],
cmdclass = {'build_ext': build_ext})
| [
"ian@ianozsvald.com"
] | ian@ianozsvald.com |
3b34f72942d5a210ed18059182dee423557a6d20 | c855bc4640a54630b62fafd89a20967606a8f14e | /breadth_first_search/200_number_of_islands.py | f01cf9ae63404df6a0a9aa9d380da3c0c64bdc5b | [
"MIT"
] | permissive | asethi-ds/Algorithm-Toolbox | 0d8f7e68ebce47810c7dd88df0bb6d911013e1d1 | b6c7b2228d8e70e0842e0bad607533a2c8322cf0 | refs/heads/master | 2021-01-02T15:44:06.833196 | 2019-08-31T11:32:11 | 2019-08-31T11:32:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,485 | py | class DU(object):
def __init__(self, n):
self.count = 0
self.parent = [-1] * n
self.rank = [0] * n
def isLand(self, idx):
return self.parent[idx] != -1
def setLand(self, idx):
self.parent[idx] = idx # initialize parent to itself
self.count += 1
def find(self, x):
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x] # DO NOT RETURN x
def union(self, x, y):
xr, yr = self.find(x), self.find(y)
if xr == yr:
return
if self.rank[xr] < self.rank[yr]:
self.parent[xr] = yr # setting parent of ROOT
elif self.rank[xr] > self.rank[yr]:
self.parent[yr] = xr # setting parent of ROOT
else:
self.parent[xr] = yr # setting parent of ROOT
self.rank[yr] += 1
self.count -= 1
class Solution2:
def numIslands(self, grid: 'List[List[str]]') -> 'int':
# empty array
if not grid or not grid[0]:
return 0
nr, nc = len(grid), len(grid[0])
du = DU(nr * nc)
for r in range(nr):
for c in range(nc):
if grid[r][c] == "1":
du.setLand(r * nc + c)
for dr, dc in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
candr = r + dr
candc = c + dc
if 0 <= candr < nr and 0 <= candc < nc and du.isLand(candr * nc + candc):
du.union(r * nc + c, candr * nc + candc)
return du.count
# clean solution
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
edge case: [], [[]]
"""
# use for loop to implicitly handle edge case
# nrow = len(grid)
# if nrow == 0: return 0
# ncol = len(grid[0])
def bfs(r, c):
queue = [(r, c)]
steps = [(-1, 0), (0, 1), (1, 0), (0, -1)] # left, up, right, down
inBound = lambda r, c: 0 <= r < len(grid) and 0 <= c < len(grid[0])
grid[r][c] = "0" # mark starter node
while queue:
r, c = queue.pop(0) # pop left
for dr, dc in steps:
rr, cc = r + dr, c + dc
if inBound(rr, cc) and grid[rr][cc] == "1":
grid[rr][cc] = "0" # mark before appending, to avoid revisiting
queue.append((rr, cc))
nlands = 0
for r in range(len(grid)):
for c in range(len(grid[0])):
if grid[r][c] == "1":
nlands += 1
bfs(r, c)
return nlands
# verbose solution, no code refactoring
# class Solution(object):
# def numIslands(self, grid):
# """
# :type grid: List[List[str]]
# :rtype: int
# """
# if not grid:
# return 0
# nrows = len(grid)
# ncols = len(grid[0])
#
# n = 0
# for r in range(nrows):
# for c in range(ncols):
# if grid[r][c] == "1":
# n += 1
#
# queue = []
# queue.append((r, c))
# while queue:
# print(queue)
# node = queue.pop(0)
# grid[node[0]][node[1]] = "0"
# if node[0] - 1 >= 0 and grid[node[0] - 1][node[1]] == '1' and (node[0] - 1, node[1]) not in queue:
# queue.append((node[0] - 1, node[1]))
# if node[0] + 1 < nrows and grid[node[0] + 1][node[1]] == '1' and (node[0] + 1, node[1]) not in queue:
# queue.append((node[0] + 1, node[1]))
# if node[1] - 1 >= 0 and grid[node[0]][node[1] - 1] == '1' and (node[0], node[1] - 1) not in queue:
# queue.append((node[0], node[1] - 1))
# if node[1] + 1 < ncols and grid[node[0]][node[1] + 1] == '1' and (node[0], node[1] + 1) not in queue:
# queue.append((node[0], node[1] + 1))
#
# print(n)
# return n
solver = Solution()
solver.numIslands([["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]])
| [
"shawlu@github.com"
] | shawlu@github.com |
cfa87a320b1aed769df2a85d3e917fa91cae9bdb | 6b70e48ddc38f58a142229bcfb5c4dc5553dfd4f | /tests/year2015/test_day05.py | 5cbb7ea5658d39adf0fabe78343051ab4404f93d | [] | no_license | N8Brooks/aoc_py | eaea98db5cdd1c795edae48a46f765e0628b53e8 | fab391ac33aa3c9623e8f5d2b9af44693c04bd00 | refs/heads/master | 2023-02-22T20:07:16.265316 | 2021-01-27T02:50:47 | 2021-01-27T02:50:47 | 326,284,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
https://adventofcode.com/2015/day/5
"""
from unittest import TestCase, main
from aoc.year2015.day05 import part1, part2
from data.utils import get_input
class TestPart1(TestCase):
def test_input(self):
self.assertEqual(part1(get_input(2015, 5)), 238)
def test_example_1(self):
self.assertEqual(part1("ugknbfddgicrmopn"), 1)
def test_example_2(self):
self.assertEqual(part1("aaa"), 1)
def test_example_3(self):
self.assertEqual(part1("jchzalrnumimnmhp"), 0)
def test_example_4(self):
self.assertEqual(part1("haegwjzuvuyypxyu"), 0)
def test_example_5(self):
self.assertEqual(part1("dvszwmarrgswjxmb"), 0)
class TestPart2(TestCase):
def test_input(self):
self.assertEqual(part2(get_input(2015, 5)), 69)
def test_example_1(self):
self.assertEqual(part2("qjhvhtzxzqqjkmpb"), 1)
def test_example_2(self):
self.assertEqual(part2("xxyxx"), 1)
def test_example_3(self):
self.assertEqual(part2("uurcxstgmygtbstg"), 0)
def test_example_4(self):
self.assertEqual(part2("ieodomkazucvgmuy"), 0)
if __name__ == "__main__": # pragma: no cover
main()
| [
"natebrooks004@gmail.com"
] | natebrooks004@gmail.com |
99017c4177c3a784d610e345e4fbeb33dcbb03b1 | 78980891d3137810bf3a3c1bb229966b7f49f0dd | /leetcode_projects/leetcode_79/main.py | 81c9df7e276c5deb6365ca9f25aa4e317a87f92d | [] | no_license | miniyk2012/leetcode | 204927d3aefc9746070c1bf13abde517c6c16dc0 | 91ca9cd0df3c88fc7ef3c829dacd4d13f6b71ab1 | refs/heads/master | 2021-06-17T21:50:31.001111 | 2021-03-10T11:36:23 | 2021-03-10T11:36:23 | 185,042,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | from typing import List
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
visited = [[False for _ in range(len(board[0]))] for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[i])):
if self.dfs_visit(board, i, j, visited, word):
return True
return False
def dfs_visit(self, board, i, j, visited, word):
if word == '' or board[i][j] == word:
return True
if board[i][j] != word[0]:
return False
visited[i][j] = True
if i - 1 >= 0 and not visited[i - 1][j]:
if self.dfs_visit(board, i - 1, j, visited, word[1:]):
return True
if i + 1 < len(board) and not visited[i + 1][j]:
if self.dfs_visit(board, i + 1, j, visited, word[1:]):
return True
if j - 1 >= 0 and not visited[i][j - 1]:
if self.dfs_visit(board, i, j - 1, visited, word[1:]):
return True
if j + 1 < len(board[0]) and not visited[i][j + 1]:
if self.dfs_visit(board, i, j + 1, visited, word[1:]):
return True
visited[i][j] = False
return False
if __name__ == '__main__':
s = Solution()
board = [
['A', 'B', 'C', 'E'],
['S', 'F', 'C', 'S'],
['A', 'D', 'E', 'E']
]
word = "ABCCED"
print(s.exist(board, word))
word = "SEE"
print(s.exist(board, word))
word = "ABCB"
print(s.exist(board, word))
print(s.exist([["a"]], "a"))
| [
"yk_ecust_2007@163.com"
] | yk_ecust_2007@163.com |
93e98da2888ffdc5e81a842198548cdc41a1f8b7 | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/8555125/snippet.py | 2e295f5380a400a7b3a6254a9c0e1ce26a8c10c3 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 799 | py | #!/usr/bin/env python
from gmusicapi import Musicmanager
from gmusicapi import Mobileclient
import sys
import os.path
params = sys.argv
if len(params) < 2:
print "usage:" + sys.argv[0] + " filename [playlist name]"
sys.exit()
file = params[1]
if len(params) == 3:
plname = params[2]
else:
plname = None
mm = Musicmanager()
api = Mobileclient()
mm.login()
api.login('GoogleID', 'Password')
track = mm.upload(file)
track_id = track[0][file]
if plname:
playlist_id = None
playlists = api.get_all_playlists()
for playlist in playlists:
if plname == playlist['name']:
playlist_id = playlist['id']
break
if playlist_id == None:
playlist_id = api.create_playlist(plname)
api.add_songs_to_playlist(playlist_id, track_id)
| [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
9712d805806c5ebc08e5a784664f19fa245f635e | 0bc0db1edc610c9f08261c777d06cb1be4b7a524 | /lgp/pythonSpider/ch6_baseSpider/SpiderMan.py | 27fa7cea73087d54c1dc84ca5aae57aee0591090 | [] | no_license | danpianji/python3.7 | 9bc7f9a765ec76d7d4c5fb413dcdada4f9e8f510 | f66bc7139f9441583b1043d3da11597987e3fbc0 | refs/heads/master | 2020-12-28T14:49:41.410708 | 2019-05-19T10:13:32 | 2019-05-19T10:13:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | # -*- coding: UTF-8 -*-
import DataOutput
import UrlManager
import HtmlDownloader
import HtmlParser
class SpiderMan:
def __init__(self):
self.manager=UrlManager.UrlManager()
self.downloader=HtmlDownloader.HtmlDownloader()
self.parser=HtmlParser.HtmlParser()
self.output=DataOutput.DataOutput()
def crawl(self, root_url):
self.manager.add_new_url(root_url)
while(self.manager.has_new_url() and self.manager.old_urls_size()<100):
try:
url = self.manager.get_new_url()
html = self.downloader.download(url)
new_urls, new_datas = self.parser.parser(url, html)
self.manager.add_new_urls(new_urls)
self.output.store_data(new_datas)
except Exception as e:
print ("crawl failed")
self.output.output_html()
if __name__=="__main__":
spider_man=SpiderMan()
spider_man.crawl("https://baike.baidu.com/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB")
| [
"liguangpei1@163.com"
] | liguangpei1@163.com |
b477b6935dc4429c088f96e141aecd0facda38c5 | ffd14a5749fae8dbf6f284bf54d026fb9f3a79dd | /leetcode/problem407/problem407.py | 4f2cf2355186fac460a5149068b2c11555ddbf38 | [] | no_license | speciallan/algorithm | 2dc307b4bcc4ac0adda97a24059346028f71f412 | 306927b65320af9f3177d28a6367ea65ea9044d5 | refs/heads/master | 2020-04-15T21:55:39.874010 | 2019-07-22T14:05:38 | 2019-07-22T14:05:38 | 165,052,274 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | def spe(*vars):
for var in vars:
print(var)
exit()
class Solution:
def trapRainWater(self, heightMap):
"""
:type heightMap: List[List[int]]
:rtype: int
"""
h, w = len(heightMap), len(heightMap[0])
i, j = 1, 1
while 1 <= i and i <= h-2:
while 1 <= j and j <= w-2:
self.allAroundAreHigher(heightMap, i, j)
def allAroundAreHigher(self, heightMap, i, j):
around = []
DIRECTION = [(0, 1), (0, -1), (1, 0), (-1, 0)]
# 是否下降
for k in range(len(DIRECTION)):
around_h = heightMap[i+DIRECTION[k][0]][j+DIRECTION[k][0]]
# 比自己大的加入around数组
if around_h > heightMap[i][j]:
around.append(around_h)
# 比自己小的往下搜索,继续寻找边界around
else:
water_line = min(around)
print('水位线:', water_line)
if __name__ == "__main__":
heightMap = [[1,4,3,1,3,2],
[3,2,1,3,2,4],
[2,3,3,2,3,1]]
solution = Solution()
result = solution.trapRainWater(heightMap)
print(result)
| [
"350394776@qq.com"
] | 350394776@qq.com |
55be60bcef1440bdbe2a556d4d6040d48a722fc2 | 9124e66c8ec04e61537473437a92b53daa32ce20 | /linares/app7.py | 96108d4cda6858d3f7e424b32b73af8cc2e1953d | [] | no_license | erick1984linares/t10_linares_rojas | 28618baccb3472fb8d48b34f5d1107b702c399d0 | ba9462b3b881dbd3665907a7a33c4c7d80aa4251 | refs/heads/master | 2020-12-04T06:38:06.929626 | 2020-01-10T11:52:29 | 2020-01-10T11:52:29 | 231,661,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | import libreria
def agregartrabajo():
trabajo=libreria.pedir_trabajo("ingrese trabajo:")
sueldo=libreria.pedir_numero("ingrese sueldo :", 1500, 4000)
contenido=trabajo + "-" + str(sueldo) + "\n"
libreria.guardar_datos("info.txt", contenido, "a")
print("Datos guardados")
def verDatos():
datos = libreria.obtener_datos("info.txt")
if ( datos != ""):
print(datos)
else:
print("Archivo sin datos")
opc=0
max=3
while(opc != max):
print("######## MENU #######")
print("1. Agregar trabajo ")
print("2. ver Datos")
print("3. Salir")
print("#####################")
opc=libreria.pedir_numero("Ingresar Opcion:", 1, 3)
if (opc == 1):
agregartrabajo()
if (opc == 2):
verDatos()
#fin_menu
print("Fin del programa")
| [
"ylinares@unprg.edu.pe"
] | ylinares@unprg.edu.pe |
c46b00db0d82519840cf3e18ea0069b174f42ca1 | 78f3fe4a148c86ce9b80411a3433a49ccfdc02dd | /2017/01/climate-anomaly-20170118/graphic_config.py | 239d3e7a57c82ad7aae3803427073bcdc9ec9e73 | [] | no_license | nprapps/graphics-archive | 54cfc4d4d670aca4d71839d70f23a8bf645c692f | fe92cd061730496cb95c9df8fa624505c3b291f8 | refs/heads/master | 2023-03-04T11:35:36.413216 | 2023-02-26T23:26:48 | 2023-02-26T23:26:48 | 22,472,848 | 16 | 7 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1Cnn0p5LpuF8chCzjUOVQcIO5NBvBpIoLLeDpbBMLP6M'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| [
"ahurt@npr.org"
] | ahurt@npr.org |
02fd6948ff472f910b32eda2c39cf98b58009ff4 | 8323f95ad0083bfe6da57b777a1af078a454bb04 | /ssm/version.py | 45b058b3d7179dad6109b590f93ac78881bb9e4d | [
"BSD-3-Clause"
] | permissive | sflis/SSM-analysis | 600c867b68845850e7af54d02dc4dd344b9f1427 | 317db8b296fd189832b9344b0429ea6016e35999 | refs/heads/master | 2020-04-19T10:19:28.810811 | 2020-01-23T15:41:36 | 2020-01-23T15:41:36 | 168,136,746 | 0 | 1 | BSD-3-Clause | 2019-09-27T09:00:48 | 2019-01-29T10:33:01 | Python | UTF-8 | Python | false | false | 5,922 | py | """
Get version identification from git.
The update_release_version() function writes the current version to the
VERSION file. This function should be called before packaging a release version.
Use the get_version() function to get the version string, including the latest
commit, from git.
If git is not available the VERSION file will be read.
Heres an example of such a version string:
v0.2.0.post58+git57440dc
This code was taken from here:
https://github.com/cta-observatory/ctapipe/blob/master/ctapipe/version.py
which in turn based it on:
https://github.com/aebrahim/python-git-version
Combining ideas from
http://blogs.nopcode.org/brainstorm/2013/05/20/pragmatic-python-versioning-via-setuptools-and-git-tags/
and Python Versioneer
https://github.com/warner/python-versioneer
but being much more lightwheight
"""
from __future__ import print_function
from subprocess import check_output, CalledProcessError
from os import path, name, devnull, environ, listdir
__all__ = ("get_version",)
CURRENT_DIRECTORY = path.dirname(path.abspath(__file__))
VFILE = "_version_cache.py"
VERSION_FILE = path.join(CURRENT_DIRECTORY, VFILE)
GIT_COMMAND = "git"
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
if name == "nt":
def find_git_on_windows():
"""find the path to the git executable on windows"""
# first see if git is in the path
try:
check_output(["where", "/Q", "git"])
# if this command succeeded, git is in the path
return "git"
# catch the exception thrown if git was not found
except CalledProcessError:
pass
# There are several locations git.exe may be hiding
possible_locations = []
# look in program files for msysgit
if "PROGRAMFILES(X86)" in environ:
possible_locations.append(
"%s/Git/cmd/git.exe" % environ["PROGRAMFILES(X86)"]
)
if "PROGRAMFILES" in environ:
possible_locations.append("%s/Git/cmd/git.exe" % environ["PROGRAMFILES"])
# look for the github version of git
if "LOCALAPPDATA" in environ:
github_dir = "%s/GitHub" % environ["LOCALAPPDATA"]
if path.isdir(github_dir):
for subdir in listdir(github_dir):
if not subdir.startswith("PortableGit"):
continue
possible_locations.append(
"%s/%s/bin/git.exe" % (github_dir, subdir)
)
for possible_location in possible_locations:
if path.isfile(possible_location):
return possible_location
# git was not found
return "git"
GIT_COMMAND = find_git_on_windows()
def get_git_describe_version(abbrev=7):
"""return the string output of git desribe"""
try:
with open(devnull, "w") as fnull:
arguments = [GIT_COMMAND, "describe", "--tags", "--abbrev=%d" % abbrev]
return (
check_output(arguments, cwd=CURRENT_DIRECTORY, stderr=fnull)
.decode("ascii")
.strip()
)
except (OSError, CalledProcessError):
return None
def format_git_describe(git_str, pep440=False):
"""format the result of calling 'git describe' as a python version"""
if "-" not in git_str: # currently at a tag
formatted_str = git_str
else:
# formatted as version-N-githash
# want to convert to version.postN-githash
git_str = git_str.replace("-", ".post", 1)
if pep440: # does not allow git hash afterwards
formatted_str = git_str.split("-")[0]
else:
formatted_str = git_str.replace("-g", "+git")
# need to remove the "v" to have a proper python version
if formatted_str.startswith("v"):
formatted_str = formatted_str[1:]
return formatted_str
def read_release_version():
"""Read version information from VERSION file"""
try:
from ._version_cache import version
if len(version) == 0:
version = None
return version
except ImportError:
return "unknown"
def update_release_version(fpath, pep440=False):
"""Release versions are stored in a file called VERSION.
This method updates the version stored in the file.
This function should be called when creating new releases.
It is called by setup.py when building a package.
pep440: bool
When True, this function returns a version string suitable for
a release as defined by PEP 440. When False, the githash (if
available) will be appended to the version string.
"""
version = get_version(pep440=pep440)
with open(path.join(fpath, VFILE), "w") as outfile:
outfile.write("version={}".format(version))
outfile.write("\n")
def get_version(pep440=False):
"""Tracks the version number.
pep440: bool
When True, this function returns a version string suitable for
a release as defined by PEP 440. When False, the githash (if
available) will be appended to the version string.
The file VERSION holds the version information. If this is not a git
repository, then it is reasonable to assume that the version is not
being incremented and the version returned will be the release version as
read from the file.
However, if the script is located within an active git repository,
git-describe is used to get the version information.
The file VERSION will need to be changed manually.
"""
raw_git_version = get_git_describe_version()
if not raw_git_version: # not a git repository
return read_release_version()
git_version = format_git_describe(raw_git_version, pep440=pep440)
return git_version
if __name__ == "__main__":
print(get_version())
| [
"samuel.d.flis@gmail.com"
] | samuel.d.flis@gmail.com |
ee8830814c25c7b4ace1b95ca6d4dcb8eb422d37 | 43e303f0a00f7854b9405bb2a2a9ecbad18ae0fa | /venv/lib/python3.7/site-packages/py2app/recipes/sip.py | 0ab3cb8cbfafc9cbed42e2d549ba3adca0b3d981 | [
"MIT"
] | permissive | ykhade/Advent_Of_Code_2019 | f64005c6e8872c17468f00eac2b247b6fa77c7f5 | 375ab43104712c5e1c782e5ea5f04073b5f8916c | refs/heads/master | 2023-02-26T03:43:47.668384 | 2022-06-21T03:31:22 | 2022-06-21T03:31:22 | 224,943,590 | 1 | 1 | MIT | 2023-02-08T00:45:15 | 2019-11-30T01:27:48 | Python | UTF-8 | Python | false | false | 4,408 | py | """
Py2app support for project using sip, which basicly means PyQt and wrappers
for other Qt-based libraries.
This will include all C modules that might be used when you import a package
using sip because we have no way to fine-tune this.
The problem with SIP is that all inter-module depedencies (for example from
PyQt4.Qt to PyQt4.QtCore) are handled in C code and therefore cannot be
detected by the python code in py2app).
"""
import sys
import glob
import os
import pkg_resources
class Sip(object):
def __init__(self):
self.packages = None
self.plugin_dir = None
def config(self):
if self.packages is not None:
return self.packages
import sipconfig, os
try:
set
except NameError:
from sets import Set as set
##old version for PyQt/Qt 3
# cfg = sipconfig.Configuration()
# qtdir = cfg.qt_lib_dir
##new version for PyQt 4
from PyQt4 import pyqtconfig
cfg = pyqtconfig.Configuration()
qtdir = cfg.qt_lib_dir
if not os.path.exists(qtdir):
# half-broken installation? ignore.
raise ImportError
# Qt is GHETTO!
dyld_library_path = os.environ.get('DYLD_LIBRARY_PATH', '').split(':')
if qtdir not in dyld_library_path:
dyld_library_path.insert(0, qtdir)
os.environ['DYLD_LIBRARY_PATH'] = ':'.join(dyld_library_path)
sipdir = os.path.dirname(cfg.pyqt_mod_dir)
self.packages = set()
self.plugin_dir = os.path.join(cfg.qt_dir, 'plugins')
for fn in os.listdir(sipdir):
fullpath = os.path.join(sipdir, fn)
if os.path.isdir(fullpath):
self.packages.add(fn)
if fn == 'PyQt4':
# PyQt4 has a nested structure, also import
# subpackage to ensure everything get seen.
for sub in os.listdir(fullpath):
if ".py" not in sub:
self.packages.add('%s.%s'%(fn, sub.replace(".so","")))
# Causes a python3-related syntax error (metaclass keyword),
# and you probably don't need it:
#if "PyQt4.uic" in self.packages and sys.version_info.major != 3:
# print("WARNING: PyQt uic module found.")
# print("avoid python3 metaclass syntax errors by adding 'PyQt4.uic' to your excludes option.")
return self.packages
def check(self, cmd, mf):
try:
packages = self.config()
except ImportError:
return dict()
if 'PyQt4.uic' in packages:
# PyQt4.uic contains subpackages with python 2 and python 3
# support. Exclude the variant that won't be ussed, this avoids
# compilation errors on Python 2 (because some of the Python 3
# code is not valid Python 2 code)
if sys.version_info[0] == 2:
ref = 'PyQt4.uic.port_v3'
else:
ref = 'PyQt4.uic.port_v2'
# Exclude...
mf.lazynodes[ref] = None
for pkg in packages:
m = mf.findNode(pkg)
if m is not None and m.filename is not None:
break
else:
return None
mf.import_hook('sip', m)
m = mf.findNode('sip')
# naive inclusion of ALL sip packages
# stupid C modules.. hate hate hate
for pkg in packages:
try:
mf.import_hook(pkg, m)
except ImportError as exc:
print("WARNING: ImportError in sip recipe ignored: %s"%(exc,))
if mf.findNode('PyQt4') is not None:
resources = [pkg_resources.resource_filename('py2app', 'recipes/qt.conf')]
for item in cmd.qt_plugins:
if '/' not in item:
item = item + '/*'
if '*' in item:
for path in glob.glob(os.path.join(self.plugin_dir, item)):
resources.append((os.path.dirname('qt_plugins' + path[len(self.plugin_dir):]), [path]))
else:
resources.append((os.path.dirname(os.path.join('qt_plugins', item)), os.path.join(self.plugin_dir, item)))
return dict(resources=resources)
return dict()
check = Sip().check
| [
"ykhade@nevada.unr.edu"
] | ykhade@nevada.unr.edu |
09691989e4f1280519fda2aeb11c1288c5554678 | 4d89652acca24e0bc653e0b4cb5846ceb5b568e4 | /google-cloud-sdk/lib/surface/labelmanager/keys/create.py | 84f9e6cb8034b58c702180d29b613d537fa4faec | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | ibssasimon/LyricLingo | 410fcec94d2bd3ea75c975c55713f5b8fb913229 | 0dfc951b270912470b36ce0083afd9d4fe41b10a | refs/heads/master | 2021-06-25T10:00:18.215900 | 2020-01-09T00:35:46 | 2020-01-09T00:35:46 | 222,135,399 | 2 | 1 | null | 2021-04-30T20:54:14 | 2019-11-16T17:32:19 | Python | UTF-8 | Python | false | false | 2,418 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create command for the Label Manager - Label Keys CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.labelmanager import service as labelmanager
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.labelmanager import arguments
from googlecloudsdk.command_lib.labelmanager import operations
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Create(base.Command):
r"""Creates a label key resource under the specified label parent.
## EXAMPLES
To create a label key with the name env under an organization run:
$ gcloud alpha labelmanager keys create env \
--label_parent='organizations/123' --description='description'
"""
@staticmethod
def Args(parser):
group = parser.add_argument_group('LabelKey.', required=True)
arguments.AddLabelParentArgToParser(group, required=True)
arguments.AddDisplayNameArgToParser(group)
arguments.AddDescriptionArgToParser(parser)
arguments.AddAsyncArgToParser(parser)
def Run(self, args):
labelkeys_service = labelmanager.LabelKeysService()
labelmanager_messages = labelmanager.LabelManagerMessages()
display_name = args.DISPLAY_NAME
label_parent = args.label_parent
description = args.description
create_request = labelmanager_messages.LabelKey(
displayName=display_name, parent=label_parent, description=description)
op = labelkeys_service.Create(create_request)
if args.async_:
return op
else:
done_op = operations.WaitForOperation(
op,
'Waiting for label [{}] to be created with [{}]'.format(
display_name, op.name),
service=labelkeys_service)
return done_op
| [
"ibssasimon@gmail.com"
] | ibssasimon@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.