repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
appcues/data-uploader
|
test/cli_test.rb
|
<filename>test/cli_test.rb<gh_stars>0
require 'test/unit'
require 'json'
require 'pp'
CMD = "ruby bin/appcues-data-uploader"
class CliTest < Test::Unit::TestCase
def test_stdin
verify_test_output(`#{CMD} -a 999 -d -q < test/test.csv`)
verify_test_output(`#{CMD} -a 999 -d -q - < test/test.csv`)
end
def test_filename
verify_test_output(`#{CMD} -a 999 -d -q test/test.csv`)
end
def test_filename_twice
output = `#{CMD} -a 999 -d -q test/test.csv test/test.csv`
updates = output.split("\n").map{|line| JSON.parse(line)}
verify_test_updates(updates[0..2])
verify_test_updates(updates[3..5])
end
def test_filename_and_stdin
output = `#{CMD} -a 999 -d -q - test/test.csv < test/test.csv`
updates = output.split("\n").map{|line| JSON.parse(line)}
verify_test_updates(updates[0..2])
verify_test_updates(updates[3..5])
end
def verify_test_output(output)
updates = output.split("\n").map{|line| JSON.parse(line)}
verify_test_updates(updates)
end
def verify_test_updates(updates)
assert updates[0] == {
"account_id" => "999",
"user_id" => "123",
"profile_update" => {
"numeric" => -0.01,
"boolean" => true,
"string" => "decks",
},
"events" => [],
}
assert updates[1] == {
"account_id" => "999",
"user_id" => "Asdf",
"profile_update" => {
"numeric" => 22,
"boolean" => false,
"string" => "omg",
},
"events" => [],
}
assert updates[2] == {
"account_id" => "999",
"user_id" => "WAT",
"profile_update" => {
"numeric" => -2,
"boolean" => false,
"string" => "wat",
},
"events" => [],
}
end
end
|
ameuret/m2config
|
spec/setting_spec.rb
|
# -*- coding: utf-8 -*-
require "env"
describe M2Config::Setting do
describe "::new" do
it "stores the given value under the given key" do
M2Config::Setting.new("answer",42)
res = CFG.db[:setting].first
expect(res[:value]).to eq("42")
end
it "updates the existing value if the given key is already present" do
M2Config::Setting.new("answer",41)
M2Config::Setting.new("answer",42)
res = CFG.db[:setting].first
expect(res[:value]).to eq("42")
end
end
end
|
ameuret/m2config
|
spec/server_spec.rb
|
require "env"
describe M2Config::Server do
before(:each) do
@srv = M2Config::Server.new
@host = M2Config::Host.new({matching:"example.com", name: "ex", server_id:(rand 42)})
end
describe '::new' do
it 'creates a server entry with reasonable default settings' do
res = CFG.db[:server].first
expect(res[:access_log]).to eq(M2Config::Server::ACCESS_LOG)
expect(res[:error_log]).to eq(M2Config::Server::ERROR_LOG)
expect(res[:pid_file]).to eq(M2Config::Server::PID_FILE)
expect(res[:control_port]).to eq(M2Config::Server::CONTROL_PORT)
expect(res[:chroot]).to eq(M2Config::Server::CHROOT)
expect(res[:default_host]).to eq(M2Config::Server::DEFAULT_HOST)
expect(res[:name]).to eq(M2Config::Server::NAME)
expect(res[:bind_addr]).to eq(M2Config::Server::BIND_ADDR)
expect(res[:port]).to eq(M2Config::Server::PORT)
expect(res[:use_ssl]).to eq(M2Config::Server::USE_SSL)
end
end
describe '::first (from Sequel::Model)' do
it 'returns the first server found in the database' do
srv = M2Config::Server.first
expect(srv.id).to eq(@srv.id)
end
it 'raises if there is more than one server' do
M2Config::Server.new
expect { M2Config::Server.first }.to raise_exception /Careful ! You are calling Server.first on a database holding multiple servers/
end
end
describe '#add_host (assigns the given host to the server)' do
it 'accepts an existing Host instance' do
@srv.add_host @host
res = CFG.db[:host].where(id: @host.id).first
expect(res[:server_id]).to eq(@srv.id)
end
end
end
|
ameuret/m2config
|
lib/m2config/dir.rb
|
module M2Config
class Dir < Sequel::Model(:directory)
plugin :validation_helpers
def initialize( fields )
fields[:default_ctype] ||= "application/octet-stream"
fields[:index_file] ||= "index.html"
raise ArgumentError, "Base directories are relative to chroot and must not start with a slash (as in your <#{fields[:base]}>)" if
fields[:base] =~ /^\/.+/
raise ArgumentError, "Base directories must end with a slash (your <#{fields[:base]}> does not)" if
!(fields[:base] =~ /\/$/)
super(fields, false)
save
end
def type
"dir"
end
end
end
|
ameuret/m2config
|
lib/m2config/handler.rb
|
<filename>lib/m2config/handler.rb
module M2Config
class Handler < Sequel::Model(:handler)
plugin :validation_helpers
def initialize( fields )
raise ArgumentError, "The send and receive endpoints can not be the same" if
fields[:send_spec] == fields[:recv_spec]
fields[:recv_ident] ||= ""
super(fields, false)
save
end
def type
"handler"
end
end
end
|
ameuret/m2config
|
lib/m2config/route.rb
|
<filename>lib/m2config/route.rb
module M2Config
class Route < Sequel::Model(:route)
many_to_one :host
def initialize( fields )
fields[:target_id] = fields[:target].id
fields[:target_type] = fields[:target].type
fields.delete(:target)
super(fields, false)
save
end
def self.elect!( route66 )
toMatch = {path: route66.path, host_id: route66.host_id}
toExclude = {id: route66.id}
Route.where(toMatch).exclude(toExclude).delete
end
def host=( hostOrId )
self.host_id = hostOrId.kind_of?(Host) ? hostOrId.id : hostOrId
save
end
def target=( newTarget )
self.target_id = newTarget.id
self.target_type = newTarget.type
save
end
def target
case target_type
when "proxy" then Proxy[target_id]
when "dir" then Dir[target_id]
when "handler" then Handler[target_id]
end
end
end
end
|
ameuret/m2config
|
lib/m2config/server.rb
|
<reponame>ameuret/m2config<filename>lib/m2config/server.rb<gh_stars>0
module M2Config
class Server < Sequel::Model(:server)
ACCESS_LOG = '/logs/access.log'
ERROR_LOG = '/logs/error.log'
PID_FILE = '/run/mongrel2.pid'
CONTROL_PORT = ''
CHROOT = './'
DEFAULT_HOST = 'localhost'
NAME = 'main'
BIND_ADDR = '0.0.0.0'
PORT = 6767
USE_SSL = 0
one_to_many :hosts
def initialize( fields={} )
fields[:uuid] ||= UUID.new.generate
fields[:access_log] ||= M2Config::Server::ACCESS_LOG
fields[:error_log] ||= ERROR_LOG
fields[:pid_file] ||= PID_FILE
fields[:chroot] ||= CHROOT
fields[:default_host] ||= DEFAULT_HOST
fields[:name] ||= NAME
fields[:bind_addr] ||= BIND_ADDR
fields[:port] ||= PORT
fields[:use_ssl] ||= USE_SSL
fields[:control_port] ||= CONTROL_PORT
super fields, false
save
end
def self.first
raise "Careful ! You are calling Server.first on a database holding multiple servers" if
((Server.get {count(id)}) > 1)
super
end
end
end
|
ameuret/m2config
|
spec/host_spec.rb
|
<gh_stars>0
require "env"
describe M2Config::Host do
before(:each) do
@srv = CFG.add_server
end
describe "::new" do
it "needs to know the domain name served" do
M2Config::Host.new({matching:"example.com", name: "ex"})
res = CFG.db[:host].first
expect(res[:matching]).to eq("example.com")
end
it "can use the uuid of a server" do
host = M2Config::Host.new({matching:"example.com", name: "ex", srvUuid: @srv.uuid})
res = CFG.db[:host].where(id: host.id).first
expect(res[:server_id]).to eq(@srv.id)
expect(res[:matching]).to eq("example.com")
end
it "can use a server instance" do
host = M2Config::Host.new({matching:"example.com", name: "ex", srv: @srv})
res = CFG.db[:host].where(id: host.id).first
expect(res[:server_id]).to eq(@srv.id)
expect(res[:matching]).to eq("example.com")
end
it "enforces mongrel2 constraint about nil name" do
expect {
M2Config::Host.new({matching:"example.com"})
}.to raise_exception(ArgumentError, /name can not be nil/i)
end
end
describe "#add_route" do
it "activates a route (can be done using Route#host= too)" do
host = M2Config::Host.new({matching:"example.com", name: "ex"})
dirH = M2Config::Dir.new({base: "static/"})
dirR = M2Config::Route.new({path:"/blog", target: dirH})
host.add_route dirR
res = CFG.db[:route].first
expect(res[:host_id]).to eq(host.id)
end
end
describe '#check_routes' do
it 'returns false if some routes have identical paths' do
host = M2Config::Host.new({matching:"example.com", name: "ex"})
dir1 = M2Config::Dir.new({base: "static/"})
dir2 = M2Config::Dir.new({base: "images/"})
host.add_route M2Config::Route.new({path:"/blog1", target: dir1})
host.add_route M2Config::Route.new({path:"/blog1", target: dir2})
expect(host.check_routes).to be_falsey
end
it 'returns true if all routes have different paths' do
host = M2Config::Host.new({matching:"example2.com", name: "ex"})
dir1 = M2Config::Dir.new({base: "static/"})
dir2 = M2Config::Dir.new({base: "images/"})
r1 = M2Config::Route.new({path:"/blog3", target: dir1})
r2 = M2Config::Route.new({path:"/images", target: dir2})
host.add_route r1
host.add_route r2
expect(host.check_routes).to be_truthy
end
it 'withstands the idea of not having any routes' do # , {focus: true}
host = M2Config::Host.new({matching:"example.com", name: "ex"})
expect(host.check_routes).to be_truthy
end
end
end
|
ameuret/m2config
|
sample/full.rb
|
require "m2config"
include M2Config
cfg = M2Config::Config.new
server = M2Config::Server.new
exComHost = M2Config::Host.new({matching:"example.com", name:"ex"})
pubDir = M2Config::Dir.new({base:"public/",default_ctype: "text/html"})
pubRoute = M2Config::Route.new( {path:"/", target:pubDir} )
appHand = M2Config::Handler.new({ send_spec:"tcp://10.0.0.1:8989",
recv_spec:"tcp://10.0.0.1:9898",
send_ident: "dev.example.com ID" })
appRoute = M2Config::Route.new( {path:"/blog", target:appHand} )
exComHost.add_route appRoute
exComHost.add_route pubRoute
cfg["extra"] = 64
server.add_host exComHost
|
ameuret/m2config
|
m2config.gemspec
|
# coding: utf-8
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'm2config/version'
Gem::Specification.new do |spec|
spec.name = "m2config"
spec.version = M2Config::VERSION
spec.authors = ["<NAME>"]
spec.email = ["<EMAIL>"]
spec.description = %q{A library to easily manage a Mongrel2 configuration database}
spec.summary = %q{Manage your Mongrel2 configuration database using handy model classes that map Servers, Hosts, Routes, Directories, Proxies, Handlers and Settings}
spec.homepage = "https://github.com/ameuret/m2config"
spec.license = "MIT"
spec.files = `git ls-files`.split($/)
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
spec.require_paths = ["lib"]
spec.add_dependency "sqlite3"
spec.add_dependency "sequel", '~> 3'
spec.add_dependency "uuid"
spec.add_dependency "mime-types", '2.4.3'
spec.add_development_dependency "bundler", "~> 1.3"
spec.add_development_dependency "rake"
spec.required_ruby_version = '>= 1.9.0'
end
|
ameuret/m2config
|
spec/dir_spec.rb
|
<filename>spec/dir_spec.rb
require "env"
describe M2Config::Dir do
describe "::new" do
it "needs to know the base path to handle" do
M2Config::Dir.new({base:"images/"})
expect(CFG.db[:directory].first[:base]).to eq("images/")
end
it "defaults to application/octet-stream for the default content type" do
M2Config::Dir.new({base:"images/"})
expect(CFG.db[:directory].first[:default_ctype]).to eq("application/octet-stream")
end
it "defaults to index.html for the index file" do
dir = M2Config::Dir.new({base:"/"})
expect(CFG.db[:directory].first[:index_file]).to eq("index.html")
end
describe "helps you spot common mistakes" do
it "yells when the path contains a leading slash" do
expect {
M2Config::Dir.new({base:"/public"})
}.to raise_exception(ArgumentError, /base directories are relative to chroot and must not start with a slash/i)
end
it "yells when the path does not end with a slash" do
expect {
M2Config::Dir.new({base:"public"})
}.to raise_exception(ArgumentError, /base directories must end with a slash/i)
end
it "keeps calm with slashes inside the path" do
expect { M2Config::Dir.new({base:"public/assets/"}) }.to_not raise_exception
end
end
end
describe '#type' do
it 'returns its type' do
expect(M2Config::Dir.new({base:"/"}).type).to eq("dir")
end
end
end
|
ameuret/m2config
|
spec/proxy_spec.rb
|
require "env"
M2Config::Config.new # Dummy call to ensure that model classes are required (cf. M2Config::new)
describe M2Config::Proxy do
describe "::new" do
it "needs an address and a port number" do
M2Config::Proxy.new({addr:"legacy.local", port: 8080})
res = CFG.db[:proxy].first
expect(res[:addr]).to eq("legacy.local")
expect(res[:port]).to eq(8080)
end
end
describe '#type' do
it 'returns its type' do
expect(M2Config::Proxy.new({addr:"legacy.local", port: 8080}).type).to eq("proxy")
end
end
end
|
ameuret/m2config
|
spec/mimetype_spec.rb
|
require 'env'
describe M2Config::MimeType do
describe '::new' do
it 'reminds you that Mongrel2 wants the extension string to start with a .' do
expect {
M2Config::MimeType.new(extension:'bs', mimetype:'text/bullshit')
}.to raise_exception(ArgumentError, /extension must start with a \./i)
end
it 'yells when you try to insert duplicate extensions' do
expect {
M2Config::MimeType.new(extension:'.css', mimetype:'text/better-css')
}.to raise_exception(ArgumentError, /extension .css is already covered by text\/css type/i)
end
it 'allows you to force the redefinition of an existing extension' do
skip
fail
end
end
describe '::populate_table' do
it 'fills up the "extension -> MIME type" mapping table (using the list from https://github.com/halostatue/mime-types)' do
# Clear entire table
CFG.db[:mimetype].delete
M2Config::MimeType.populate_table()
# M2Config::MimeType.populate_table(nil,ignoreDoubles=true)
expect(M2Config::MimeType[extension:'.css'].mimetype).to eq('text/css')
end
it 'checks for doubles unless asked not to' do
CFG.db[:mimetype].delete
expect {
M2Config::MimeType.populate_table([
MIME::Type.new(['text/plain', 'zed']),
MIME::Type.new(['text/complex', 'zed'])
])
}.to raise_exception(ArgumentError,
/\.zed \(text\/complex\) has multiple content types but no Mongrel2 preference/i)
end
it 'accepts an array of MIME::Types if the whole list is too much' do
CFG.db[:mimetype].delete
M2Config::MimeType.populate_table [MIME::Type.new(['text/plain', 'zed'])]
expect(M2Config::MimeType[extension:'.zed'].mimetype).to eq('text/plain')
expect(M2Config::MimeType[extension:'.css']).to eq(nil)
end
it 'frowns when asked to populate a non-empty table' do
# At this point the table has been filled up on creation
expect {
M2Config::MimeType.populate_table
}.to raise_exception(RuntimeError, /table must be empty/i)
end
end
end
|
ameuret/m2config
|
spec/env.rb
|
<reponame>ameuret/m2config
require "bundler/setup"
require "m2config" # rspec automatically adds ./lib to LOAD_PATH
require "sqlite3"
require "pp"
require "pry"
require "pry-nav"
DEFAULT_DB_NAME = "config.sqlite"
CUSTOM_DB_NAME = "custom.sqlite"
EXISTING_DB_NAME = "empty.sqlite"
RSpec.configure do |config|
File.delete DEFAULT_DB_NAME rescue nil
CFG = M2Config::Config.new(DEFAULT_DB_NAME,{ignoreDoubles:true})
config.around(:each) do |example|
CFG.db.transaction do
example.call
raise Sequel::Error::Rollback
end
end
end
|
ameuret/m2config
|
spec/handler_spec.rb
|
<gh_stars>0
# -*- coding: utf-8 -*-
require "env"
describe M2Config::Handler do
describe "::new" do
it "needs the ØMQ addresses and a send identifier" do
M2Config::Handler.new({ send_spec:"tcp://10.0.0.1:8989",
recv_spec:"tcp://10.0.0.1:9898",
send_ident: "dev.example.com ID"})
res = CFG.db[:handler].first
expect(res[:send_spec]).to eq("tcp://10.0.0.1:8989")
expect(res[:recv_spec]).to eq("tcp://10.0.0.1:9898")
expect(res[:send_ident]).to eq("dev.example.com ID")
end
it "turns nil into empty string when recv_ident is not set" do
M2Config::Handler.new({ send_spec:"tcp://10.0.0.1:8989",
recv_spec:"tcp://10.0.0.1:9898",
send_ident: "dev.example.com ID"})
res = CFG.db[:handler].first
expect(res[:recv_ident]).to be_empty
end
describe "helps you spot common mistakes" do
it "yells when the addresses are the same" do
expect do
M2Config::Handler.new({send_spec:"tcp://10.0.0.1:8989", recv_spec:"tcp://10.0.0.1:8989", send_ident: "dev.example.com ID"})
end.to raise_exception(ArgumentError, /send and receive endpoints can not be the same/i)
end
end
end
describe '#type' do
it 'returns its type' do
expect(M2Config::Handler.new({send_spec:"tcp://10.0.0.1:8988", recv_spec:"tcp://10.0.0.1:8989", send_ident: "dev.example.com ID"}).type).to eq("handler")
end
end
end
|
ameuret/m2config
|
spec/m2config_spec.rb
|
require "env"
TABLES = %w(directory handler host log mimetype proxy route server setting statistic filter)
describe M2Config do
describe "::new" do
it 'creates a default database when name absent' do
expect(File).to exist DEFAULT_DB_NAME
end
it 'learns the DB schema from the official schema dump' do
for table in TABLES do
expect(M2Config::Config.tables).to include table
end
end
it 'creates a valid database structure' do
for table in M2Config::Config.tables do
expect {CFG.db["SELECT * FROM #{table};"].first}.to_not raise_error
end
end
it 'populates the MIME table with a nice set of mappings' do
expect(M2Config::MimeType[extension:".html"].mimetype).to eq("text/html")
expect(M2Config::MimeType[extension:".css"].mimetype).to eq("text/css")
end
end
describe '#[]=' do
it 'creates or modifies the value of a setting' do
CFG["zeromq.threads"] = 8
expect(CFG.db[:setting].where(key:"zeromq.threads").first[:value]).to eq("8")
end
end
end
|
ameuret/m2config
|
lib/m2config/host.rb
|
<reponame>ameuret/m2config
module M2Config
class Host < Sequel::Model(:host)
one_to_many :routes, {class: "M2Config::Route"}
def initialize( fields )
s = resolveServer fields
fields[:server_id] = s.id if s
raise ArgumentError, "Name can not be nil" if fields[:name].nil?
super fields, false
save
end
def check_routes
paths = routes(true).map {
|route|
route.path
}
# return true if paths.empty?
# return "#{paths.uniq.to_s} == #{paths.to_s}"
paths.uniq.size == paths.size
end
private
def resolveServer( fields )
if fields[:srv]
s=fields[:srv]
fields.delete :srv
else
if fields[:srvUuid]
s=Server.find({uuid: fields[:srvUuid]})
fields.delete :srvUuid
end
end
s
end
end
end
|
ameuret/m2config
|
lib/m2config/proxy.rb
|
<reponame>ameuret/m2config
module M2Config
class Proxy < Sequel::Model(:proxy)
def initialize( fields )
super(fields, false)
save
end
def type
"proxy"
end
end
end
|
ameuret/m2config
|
spec/route_spec.rb
|
<reponame>ameuret/m2config<gh_stars>0
require "env"
describe M2Config::Route do
describe "::new" do
it "needs to know the path pattern and the target handler" do
dirH = M2Config::Dir.new({base: "static/"})
M2Config::Route.new({path:"/blog", target: dirH})
res = CFG.db[:route].first
expect(res[:path]).to eq("/blog")
expect(res[:target_id]).to eq(dirH.id)
expect(res[:target_type]).to eq(dirH.type)
end
end
describe "::elect!" do
it "makes a route the exclusive matcher for its path by deleting all other matching routes (on the same host)" do
host = M2Config::Host.new({name:"main",matching:"actime.biz"})
dirH = M2Config::Dir.new({base: "static/"})
dir2 = M2Config::Dir.new({base: "ManceRayder/"})
r1 = M2Config::Route.new({path:"/king", target: dirH, host: host})
r2 = M2Config::Route.new({path:"/king", target: dir2, host: host})
r3 = M2Config::Route.new({path:"/king", target: dirH, host: host})
M2Config::Route.elect!(r2)
expect(host.check_routes).to be_truthy
king = M2Config::Route.where(path: "/king").first
expect(king.target.base).to eq("ManceRayder/")
end
it "leaves routes belonging to a different host untouched" do
host = M2Config::Host.new({name:"main",matching:"actime.biz"})
host2 = M2Config::Host.new({name:"secondary",matching:"nameforge.net"})
dirH = M2Config::Dir.new({base: "static/"})
dir2 = M2Config::Dir.new({base: "ManceRayder/"})
r1 = M2Config::Route.new({path:"/king", target: dirH, host: host})
r2 = M2Config::Route.new({path:"/king", target: dir2, host: host})
r3 = M2Config::Route.new({path:"/king", target: dirH, host: host2})
M2Config::Route.elect!(r2)
expect(host.check_routes).to be_truthy
king = M2Config::Route.where(path: "/king").first
expect(king.target.base).to eq("ManceRayder/")
onOtherHost = M2Config::Route.where(path:"/king", target_id: dirH.id, host: host2).first
expect(onOtherHost).not_to be_nil
end
end
describe "#host=" do
it "can be used if the host is not known at creation" do
host = M2Config::Host.new({matching:"example.com", name: "ex"})
dirH = M2Config::Dir.new({base: "static/"})
r = M2Config::Route.new({path:"/blog", target: dirH})
r.host = host.id
res = CFG.db[:route].first
expect(res[:host_id]).to eq(host.id)
end
it "can take a Host instance" do
host = M2Config::Host.new({matching:"example.com", name: "ex"})
dirH = M2Config::Dir.new({base: "static/"})
r = M2Config::Route.new({path:"/blog", target: dirH})
r.host = host
res = CFG.db[:route].first
expect(res[:host_id]).to eq(host.id)
end
end
describe '#host' do #
it 'returns the associated Host object' do
dirH = M2Config::Dir.new({base: "static/"})
r = M2Config::Route.new({path:"/blog", target: dirH})
r.host = M2Config::Host.new({matching:"example.com", name: "ex"})
expect(r.host.matching).to eq("example.com")
end
end
describe '#target=' do
it 'reassigns the target object' do
dirH = M2Config::Dir.new({base: "static/"})
r = M2Config::Route.new({path:"/blog", target: dirH})
newTarget = M2Config::Proxy.new({addr:"127.0.0.1", port: 15970})
r.target = newTarget
res = CFG.db[:route].first
expect(res[:target_id]).to eq(newTarget.id)
expect(res[:target_type]).to eq(newTarget.type)
end
end
describe '#target' do
it 'returns the Target object' do
dirH = M2Config::Dir.new({base: "static/"})
r = M2Config::Route.new({path:"/blog", target: dirH})
expect(r.target.base).to eq dirH.base
end
end
end
|
ameuret/m2config
|
lib/m2config/setting.rb
|
<reponame>ameuret/m2config
module M2Config
class Setting < Sequel::Model(:setting)
plugin :validation_helpers
def initialize( key, value )
if s=Setting[key:key]
s.update({value:value})
return s
else
fields = {}
fields[:key] = key.to_s
fields[:value] = value.to_s
super(fields, false)
save
return self
end
end
end
end
|
ameuret/m2config
|
lib/m2config.rb
|
<filename>lib/m2config.rb<gh_stars>0
require "sequel"
require "uuid"
module M2Config
class Config
DEFAULT_CONFIG = "config.sqlite"
SCHEMA = File.read("#{File.dirname __FILE__}/m2config/schema.sql")
attr_reader :db
@@foundTables = []
def self.tables
return @@foundTables unless @@foundTables.empty?
SCHEMA.split("\n").each do
|l|
if l =~ /CREATE TABLE (\w+)/
@@foundTables.push $1
end
end
@@foundTables
end
def initialize(fileName = DEFAULT_CONFIG, options={})
@fileName = fileName
creating = ! (File.exists? @fileName)
@db = Sequel.connect "sqlite://#{@fileName}"
@db.run SCHEMA if creating
Sequel::Model.db = @db
require "m2config/server"
require "m2config/host"
require "m2config/dir"
require "m2config/route"
require "m2config/proxy"
require "m2config/handler"
require "m2config/setting"
require "m2config/mimetype"
M2Config::MimeType.populate_table(nil,options[:ignoreDoubles]) if creating
end
def add_server( settings = {} )
srv = Server.new settings
end
def []=( k, v )
Setting.new k, v
end
end
end
|
ameuret/m2config
|
lib/m2config/mimetype.rb
|
require "mime/types"
require "yaml"
module M2Config
class MimeType < Sequel::Model(:mimetype)
MONGREL2_PREFS = YAML.load File.read "#{File.dirname(__FILE__)}/mongrel2.mime.yml"
RECESSIVE = YAML.load File.read "#{File.dirname(__FILE__)}/mongrel2.mime.yml"
SQL_FIND_DUPLICATES = <<-SQL
select mimetype, extension, count(*)
from mimetype
group by extension
having count(*)>1
SQL
def self.populate_table(types=nil, ignoreDoubles=false)
raise RuntimeError, "Table must be empty" if db[:mimetype].count > 0
types ||= MIME::Types
rows = [] # Will collect ext<->type rows
types.each {
|type|
next if not_dominant?(type)
type.extensions.each {
|ext|
ext = "."+ext
clashingType = M2Config::MimeType[extension:ext]
raise ArgumentError, "extension #{ext} is already covered by #{clashingType.mimetype} type" if clashingType
rows << [type.content_type, ext]
}
}
db.transaction {
db[:mimetype].import([:mimetype, :extension], rows)
}
remove_duplicates unless ignoreDoubles
end
# Is it reasonable to ignore this type ?
def self.not_dominant?(mtype)
mtype.obsolete? || superceded?(mtype) || !simplest?(mtype)
end
def self.superceded?(mtype)
mtype.docs =~ /instead/
end
def self.simplest?(mtype)
mtype.content_type == mtype.simplified
end
def self.remove_duplicates
randomChoices = []
db[SQL_FIND_DUPLICATES].all.each {
|r|
ext = r[:extension]
preferred = MONGREL2_PREFS[ext]
if preferred
db[:mimetype].where(extension:ext).delete
db[:mimetype].insert(extension:ext, mimetype:preferred)
else
raise ArgumentError, "#{ext} (#{r[:mimetype]}) has multiple content types but no Mongrel2 preference"
end
}
raise RuntimeError, "Still duplicates after removing duplicates!" if
db[SQL_FIND_DUPLICATES].all.size > 0
end
def initialize(fields)
raise ArgumentError, "Extension must start with a ." unless fields[:extension] =~ /^\./
type = M2Config::MimeType[extension:fields[:extension]]
raise ArgumentError, "extension #{fields[:extension]} is already covered by #{type.mimetype} type" if type
super(fields, false)
save
end
end
end
|
dorianlac/dnsdynamic
|
dnsdynamic.rb
|
<filename>dnsdynamic.rb<gh_stars>0
#!/usr/bin/env ruby
require 'net/http'
require 'net/https'
#example -> ruby dnsdynamic.rb username password domain timeout
username = ARGV[0]
password = ARGV[1]
domain = ARGV[2]
timeout = ARGV[3]
def change(username, password, domain, currentIP)
http = Net::HTTP.new('dnsdynamic.org',443)
req = Net::HTTP::Get.new('https://www.dnsdynamic.org/api/?hostname='+domain+'.dnsdynamic.com&myip='+currentIP)
http.use_ssl = true
req.basic_auth username, password
response = http.request(req)
time1 = Time.new
File.open("ddns.log", 'a') {|f| f.write(time1.inspect+': '+response.body) }
end
def getCurrentIP()
currentIP=""
http1 = Net::HTTP.new('myip.dnsdynamic.org',80)
req1 = Net::HTTP::Get.new('http://myip.dnsdynamic.org')
http1.use_ssl = false
response1 = http1.request(req1){|response|
response.read_body do |str|
currentIP = str
end
}
return currentIP
end
while 1 do
change(username,password,domain,getCurrentIP())
sleep(60* Integer(timeout))
end
|
mrdaios/DevOnPods
|
AwesomeProject.podspec
|
<gh_stars>10-100
Pod::Spec.new do |s|
s.name = "AwesomeProject"
s.version = "1"
s.summary = "Use Pods Demo"
s.homepage = "https://github.com/DianQK/DevOnPods"
s.license = { :type => "MIT", :file => "LICENSE" }
s.author = { "DianQK" => "<EMAIL>" }
s.source = { :git => "https://github.com/DianQK/DevOnPods.git",
:tag => s.version.to_s }
s.ios.deployment_target = "8.0"
# s.vendored_frameworks = ["Pods/Carthage/Build/iOS/Then.framework", "Pods/Carthage/Build/iOS/SwiftyJSON.framework"]
['Then', 'SwiftyJSON', 'AwesomeModule'].each do |name|
s.subspec name do |sp|
sp.vendored_frameworks = "Pods/Carthage/Build/iOS/#{name}.framework"
end
end
end
# Pod::Spec.new do |s|
# s.name = "Then"
# s.version = "2.1.0"
# s.summary = "Super sweet syntactic sugar for Swift initializers."
# s.homepage = "https://github.com/devxoul/Then"
# s.license = { :type => "MIT", :file => "LICENSE" }
# s.author = { "<NAME>" => "<EMAIL>" }
# s.source = { :git => "https://github.com/devxoul/Then.git",
# :tag => s.version.to_s }
# s.source_files = "Sources/*.swift"
# s.requires_arc = true
#
# s.ios.deployment_target = "8.0"
# s.osx.deployment_target = "10.9"
# s.tvos.deployment_target = "9.0"
# end
|
LegionIO/lex-elasticsearch
|
spec/legion/extensions/elasticsearch_spec.rb
|
<filename>spec/legion/extensions/elasticsearch_spec.rb
RSpec.describe Legion::Extensions::Elasticsearch do
it 'has a version number' do
expect(Legion::Extensions::Elasticsearch::VERSION).not_to be nil
end
end
|
LegionIO/lex-elasticsearch
|
lib/legion/extensions/elasticsearch.rb
|
require 'legion/extensions/elasticsearch/version'
module Legion
module Extensions
module Elasticsearch
extend Legion::Extensions::Core if Legion::Extensions.const_defined? :Core
end
end
end
|
proinsias/copier-python-template
|
copier-python-template/.mdl.rb
|
# frozen_string_literal: true
all
exclude_rule 'MD004'
rule 'MD007', indent: 4
rule 'MD013', line_length: 120
rule 'MD026', punctuation: '.,:;'
rule 'MD029', style: 'ordered'
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node_aws/list.rb
|
require 'puppet/cloudpack'
require 'puppet/face/node_aws'
Puppet::Face.define :node_aws, '0.0.1' do
action :list do
summary 'List AWS EC2 machine instances.'
description <<-'EOT'
This action obtains a list of instances from the cloud provider and
displays them on the console output. For EC2 instances, only the instances in
a specific region are provided.
EOT
Puppet::CloudPack.add_list_options(self)
when_invoked do |options|
Puppet::CloudPack.list(options)
end
when_rendering :console do |value|
value.collect do |id,status_hash|
"#{id}:\n" + status_hash.collect do |field, val|
" #{field}: #{val}"
end.sort.join("\n")
end.sort.join("\n")
end
returns 'Array of attribute hashes containing information about each EC2 instance.'
examples <<-'EOT'
List every instance in the US East region:
$ puppet node_aws list --region=us-east-1
i-e8e04588:
created_at: Tue Sep 13 01:21:16 UTC 2011
dns_name: ec2-184-72-85-208.compute-1.amazonaws.com
id: i-e8e04588
state: running
EOT
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node_aws/create.rb
|
require 'puppet/cloudpack'
require 'puppet/face/node_aws'
Puppet::Face.define :node_aws, '0.0.1' do
action :create do
summary 'Create a new EC2 machine instance.'
description <<-EOT
This action launches a new Amazon EC2 instance and returns the public
DNS name suitable for SSH access.
A newly created system may not be immediately ready after launch while
it boots. You can use the `fingerprint` action to wait for the system to
become ready after launch.
If creation of the instance fails, Puppet will automatically clean up
after itself and tear down the instance.
EOT
Puppet::CloudPack.add_create_options(self)
when_invoked do |options|
Puppet::CloudPack.create(options)
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node/install.rb
|
<filename>lib/puppet/face/node/install.rb
require 'puppet/cloudpack'
Puppet::Face.define :node, '0.0.1' do
action :install do
summary 'Install Puppet on a running node.'
description <<-EOT
Installs Puppet on an existing node at <hostname_or_ip>. It uses scp to
copy installation requirements to the machine, and ssh to run the
installation commands remotely.
This action can be used on both physical and virtual machines.
EOT
arguments '<hostname_or_ip>'
Puppet::CloudPack.add_install_options(self)
when_rendering :console do |return_value|
return_value.keys.sort.collect { |k| "%20.20s: %-20s" % [k, return_value[k]] }.join("\n")
end
when_invoked do |server, options|
Puppet::CloudPack.install(server, options)
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node_aws/fingerprint.rb
|
require 'puppet/cloudpack'
require 'puppet/face/node_aws'
Puppet::Face.define :node_aws, '0.0.1' do
action :fingerprint do
summary 'Make a best effort to securely obtain the SSH host key fingerprint.'
description <<-EOT
This action attempts to retrieve a host key fingerprint by using the EC2
API to search the console output. This provides a secure way to retrieve
the fingerprint from an EC2 instance. You should run the `fingerprint`
action immediately after creating an instance, as you wait for it to
finish booting.
This action can only retrieve a fingerprint if the instance's original
image was configured to print the fingerprint to the system console.
Note that many machine images do not print the fingerprint to the
console. If this action is unable to find a fingerprint, it will display
a warning.
In either case, if this command returns without an error, then the
instance being checked is ready for use.
EOT
arguments '<instance_name>'
Puppet::CloudPack.add_fingerprint_options(self)
when_invoked do |server, options|
Puppet::CloudPack.fingerprint(server, options)
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
acceptance_tests/install.rb
|
<reponame>olindata/puppetlabs-cloud_provisioner
#
# This uses the test harness to install
# and configure cloud provisioner
#
# This can be removed once the installation
# of cloud provisioner is added to PE
#
# on every node, we need to install the fog gem
controller = nil
test_name "Install Puppet Cloud Provisioner"
# find the controller node
hosts.each do |host|
if host['roles'].include? 'controller'
controller = host
end
end
skip_test 'test requires a master and controller' unless controller and master
step 'give the controller access to sign certs on the master'
cert_auth ="
path /certificate_status
method save
auth yes
allow #{controller.to_s}
"
# copy auth rules to the top of the file
auth_path='/etc/puppetlabs/puppet/auth.conf'
on master, "mv #{auth_path}{,.save}"
on master, "echo '#{cert_auth}' > #{auth_path};cat #{auth_path}.save >> #{auth_path}"
skip_test 'cannot find fog credentials' unless File.exists?(File.expand_path '~/.fog')
# install the latest version of fog, this should use the source version
# the vspere support will require master
step 'install latest version of fog'
on controller, 'yum install -y libxml2 libxml2-devel libxslt libxslt-devel'
on controller, '/opt/puppet/bin/gem install guid --no-rdoc --no-ri'
# the latest version of net-ssh causes fog to fail to install
on controller, '/opt/puppet/bin/gem install net-ssh -v 2.1.4 --no-ri --no-rdoc'
on controller, '/opt/puppet/bin/gem install fog --no-ri --no-rdoc'
on controller, 'cd /opt/puppet;git clone http://github.com/puppetlabs/puppetlabs-cloud-provisioner.git'
# assumes that you have bash installed
on controller, 'echo "export RUBYLIB=/opt/puppet/puppetlabs-cloud-provisioner/lib/:$RUBYLIB" >> /root/.bashrc'
step 'provide test machine ec2 credentials. Be warned, your credientials located at ~/.fog are being copied to the test machine at /root/.fog.'
scp_to controller, File.expand_path('~/.fog'), '/root/.fog'
step 'test that fog can connect to ec2 with provided credentials'
# this is failing b/c of net/ssh mismatch?
# sync clocks so that the EC2 connection will work
on controller, 'ntpdate pool.ntp.org'
on controller, '/opt/puppet/bin/ruby -rubygems -e \'require "fog"\' -e \'puts Fog::Compute.new(:provider => "AWS").servers.length >= 0\''
|
olindata/puppetlabs-cloud_provisioner
|
spec/unit/puppet/face/node_aws/fingerprint_spec.rb
|
<filename>spec/unit/puppet/face/node_aws/fingerprint_spec.rb
require 'spec_helper'
require 'puppet/cloudpack'
describe Puppet::Face[:node_aws, :current] do
before :all do
data = Fog::Compute::AWS::Mock.data['us-east-1'][Fog.credentials[:aws_access_key_id]]
data[:images]['ami-12345'] = { 'imageId' => 'ami-12345' }
data[:key_pairs]['some_keypair'] = { 'keyName' => 'some_keypair' }
end
before :each do
@options = {
:region => 'us-east-1',
}
end
describe 'option validation' do
describe '(region)' do
it "should not require a region name" do
@options.delete(:region)
# JJM This is absolutely not ideal, but I cannot for the life of me
# figure out how to effectively deal with all of the create_connection
# method calls in the option validation code.
Puppet::CloudPack.stubs(:create_connection).with() do |options|
raise(Exception, "region:#{options[:region]}")
end
expect { subject.fingerprint(@options) }.to raise_error Exception, 'region:us-east-1'
end
it 'should validate the region' do
pending "This test does not pass because there are no required options for the action. Pending proper handling of option defaults in the Faces API."
@options[:region] = 'mars-east-100'
expect { subject.fingerprint(@options) }.to raise_error ArgumentError, /one of/
end
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node_gce.rb
|
<reponame>olindata/puppetlabs-cloud_provisioner
require 'puppet/face'
require 'pathname'
require 'puppet/cloudpack/gce'
Puppet::Face.define(:node_gce, '1.0.0') do
copyright "Puppet Labs", 2013
license "Apache 2 license; see COPYING"
summary "View and manage Google Compute nodes."
description <<-'EOT'
This subcommand provides a command line interface to manage Google Compute
machine instances. We support creation of instances, shutdown of instances
and basic queries for Google Compute instances.
EOT
action :register do
summary 'Register your Cloud Provisioner GCE client with Google Cloud'
description <<-EOT
Register your Cloud Provisioner GCE client with Google Cloud.
In order for the GCE client to operate, it needs to establish a secure
trust relationship with the Google Cloud API, and the project you are
working with.
This action captures the registration process, and stores the secret
data required to authenticate you. It will open a page in your web
browser to access the requisite authentication data.
EOT
arguments 'CLIENT_ID CLIENT_SECRET'
when_invoked do |client_id, client_secret, options|
require 'puppet/google_api'
Puppet::GoogleAPI.new(client_id, client_secret).discover('compute', 'v1beta15') or
raise "unable to discover the GCE v1beta15 API"
true
end
when_rendering :console do |result|
if result
'Registration was successful, and the GCE API is available'
else
'Registration failed, or the GCE API was not available'
end
end
end
action :list do
summary 'List GCE compute instances'
description <<-EOT
List GCE compute instances.
This provides a list of GCE compute instances for the selected project.
EOT
Puppet::CloudPack::GCE.options(self, :project)
# We can't have a default value for zone here, unlike every other
# operation that wants one.
Puppet::CloudPack::GCE.add_zone(self, false)
when_invoked do |options|
require 'puppet/google_api'
api = Puppet::GoogleAPI.new
if options[:zone]
api.compute.instances.list(options[:project], options[:zone])
else
api.compute.instances.aggregated_list(options[:project])
end
end
when_rendering :console do |output|
if output.is_a? Hash
output.map do |key, value|
if value.empty?
"#### zone: #{key}\n<no instances in zone>\n"
else
"#### zone: #{key}\n" + value.map(&:to_s).join("\n\n") + "\n"
end
end.join("\n")
else
output.join("\n\n")
end
end
end
action :delete do
summary 'delete an existing GCE compute instance'
description <<-EOT
Delete an existing GCE computer instance.
This starts the process of deleting the instance, which happens
in the background, and optionally waits for completion.
EOT
arguments '<name>'
Puppet::CloudPack::GCE.options(self, :project, :zone, :wait)
when_invoked do |name, options|
require 'puppet/google_api'
api = Puppet::GoogleAPI.new
api.compute.instances.delete(options[:project], options[:zone], name, options)
end
when_rendering :console do |result|
if result.error
# @todo danielp 2013-09-17: untested
result.error.errors.each do |msg|
Puppet.err(msg.message || msg.code)
end
else
(result.warnings || []).each do |msg|
Puppet.warning(msg.message || msg.code)
end
"Deleting the VM is #{result.status.downcase}"
end
end
end
action :user do
summary 'Manage user login accounts and SSH keys on an instance'
description <<-EOT
Manage user login accounts and SSH keys on an instance.
This operates by modifying the instance `sshKey` metadata value,
which contains a list of user accounts and SSH key data. The
Google supplied images use this to sync active accounts on the
instances.
The sync process runs once a minute, so there is a potential for
some delay between our update and the change being reflected on the
machine in production.
Also, notably, this may no longer work on custom images: if you don't
include the Google sync process, this will "succeed" in the sense that
the metadata will be changed, but nothing will happen on the target
instance.
EOT
Puppet::CloudPack::GCE.options(self, :project, :zone)
arguments '<instance> ( remove <user> | set <user> <key> )'
when_invoked do |*args|
require 'puppet/google_api'
api = Puppet::GoogleAPI.new
# destructure our arguments nicely; wish this were doable in the method
# arguments, but sadly ... not so. :/
options = args.pop
name, action, user, key, *bad = args
name or raise "you must give the instance name to modify"
user or raise "you must tell me which user to act on"
bad.empty? or raise "unexpected trailing arguments #{bad.join(', ')}"
# Fetch the existing metadata, by way of fetching the entire instance.
node = api.compute.instances.get(options[:project], options[:zone], name) or
raise "unable to find instance #{name} in #{options[:project]} #{options[:zone]}"
metadata = Hash[node.metadata.items.map {|i| [i.key, i.value]}]
if ssh = metadata['sshKey']
ssh = Hash[metadata['sshKey'].split("\n").map {|s| s.split(':') }]
else
ssh = {}
end
case action
when 'remove'
key and raise "unexpected key argument when removing a user"
ssh.delete(user)
when 'set'
key or raise "the key must be supplied when setting a user key"
ssh[user] = key
else
raise "I don't know how to '#{action}' a user, sorry"
end
# ...and now set that modified data back.
metadata['sshKey'] = ssh.<KEY>
api.compute.instances.set_metadata(
options[:project], options[:zone], name,
node.metadata.fingerprint, metadata, options)
end
when_rendering :console do |result|
"Updating the ssh key metadata is #{result.status.downcase}"
end
end
action :create do
summary 'create a new GCE compute instance'
description <<-EOT
Create a new GCE computer instance.
This starts the process of creating a new instance, which happens
in the background, and optionally waits for completion.
EOT
arguments '<name> <type>'
Puppet::CloudPack::GCE.options(self, :project, :zone)
Puppet::CloudPack::GCE.options(self, :image, :login)
# @todo danielp 2013-09-16: we should support network configuration, but
# for now ... we don't. Sorry. Best of luck.
when_invoked do |name, type, options|
require 'puppet/google_api'
api = Puppet::GoogleAPI.new
api.compute.instances.create(options[:project], options[:zone], name, type, options)
end
when_rendering :console do |result|
if result.error
# @todo danielp 2013-09-17: untested
result.error.errors.each do |msg|
Puppet.err(msg.message || msg.code)
end
"Creating the VM failed"
else
(result.warnings || []).each do |msg|
Puppet.warning(msg.message || msg.code)
end
"Creating the VM is #{result.status.downcase}"
end
end
end
action :bootstrap do
summary 'Create a new GCE VM, then install Puppet via SSH.'
description <<-EOT
Create a new GCE VM, and then install Puppet via SSH.
This wraps up the process from end to end -- creating the new VM,
configuring it, getting Puppet installed, and finally delivering
it more or less production ready.
EOT
arguments '<name> <type>'
Puppet::CloudPack::GCE.options(self, :project, :zone)
Puppet::CloudPack::GCE.options(self, :image, :login)
# These are the non-AWS options used for installation.
Puppet::CloudPack.add_payload_options(self)
Puppet::CloudPack.add_classify_options(self)
when_invoked do |name, type, options|
require 'puppet/google_api'
api = Puppet::GoogleAPI.new
# Because we are performing synchronous operations, we deliberately want
# to wait for each of them to complete before proceeding.
options.merge!(wait: true)
api.compute.instances.create(options[:project], options[:zone], name, type, options)
# Figure out the host details, such as the public address. Hope we have
# one, or this is going to be a short trip. ;)
instance = api.compute.instances.get(options[:project], options[:zone], name)
host = instance.network_interfaces.
map {|iface| iface.access_configs }.
flatten.
select {|config| config.nat_ip }.
first.nat_ip
# Next, install Puppet on the machine.
Puppet::CloudPack.init(host, options)
end
end
action :ssh do
summary 'SSH to a GCE VM.'
arguments '<name>'
Puppet::CloudPack::GCE.options(self, :project, :zone, :login)
when_invoked do |name, options|
require 'puppet/google_api'
api = Puppet::GoogleAPI.new
instance = api.compute.instances.get(options[:project], options[:zone], name)
host = instance.network_interfaces.
map {|iface| iface.access_configs }.
flatten.
select {|config| config.nat_ip }.
first.nat_ip
ssh = %w[ssh -o UserKnownHostsFile=/dev/null -o CheckHostIP=no
-o StrictHostKeyChecking=no -A -p 22]
# Add the specific SSH key, if the user gives us one.
options[:key] and ssh += ['-i', options[:key]]
# Build the target -- user ID if given, and target address
target = options[:login] ? "#{options[:login]}@" : ''
target += host
ssh << target
puts ssh.join(' ')
Process.exec *ssh
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node_aws/bootstrap.rb
|
<filename>lib/puppet/face/node_aws/bootstrap.rb
require 'puppet/cloudpack'
require 'puppet/face/node_aws'
Puppet::Face.define :node_aws, '0.0.1' do
action :bootstrap do
summary 'Create and initialize an EC2 instance using Puppet.'
description <<-EOT
Creates an instance, classifies it, and signs its certificate. The
classification is currently done using Puppet Dashboard or Puppet
Enterprise's console.
EOT
Puppet::CloudPack.add_bootstrap_options(self)
when_invoked do |options|
Puppet::CloudPack.bootstrap(options)
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node_aws/terminate.rb
|
require 'puppet/cloudpack'
require 'puppet/face/node_aws'
Puppet::Face.define :node_aws, '0.0.1' do
action :terminate do
summary 'Terminate an EC2 machine instance.'
description <<-EOT
Terminate the instance identified by <instance_name>.
EOT
arguments '<instance_name>'
Puppet::CloudPack.add_terminate_options(self)
when_invoked do |server, options|
Puppet::CloudPack.terminate(server, options)
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/application/node_gce.rb
|
<filename>lib/puppet/application/node_gce.rb
require 'puppet/application/face_base'
class Puppet::Application::Node_gce < Puppet::Application::FaceBase
end
|
olindata/puppetlabs-cloud_provisioner
|
spec/unit/puppet/cloudpack/progressbar_spec.rb
|
require 'spec_helper'
describe "Puppet::CloudPack.constants" do
subject { Puppet::CloudPack.constants.collect { |k| k.to_s } }
it { should include("ProgressBar") }
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node/init.rb
|
<filename>lib/puppet/face/node/init.rb<gh_stars>1-10
require 'puppet/cloudpack'
Puppet::Face.define :node, '0.0.1' do
action :init do
summary 'Install Puppet on a node and classify it.'
description <<-EOT
Installs Puppet on an arbitrary node (see "install"), classify it in
Puppet Dashboard or Puppet Enterprise's console (see "classify"), and
automatically sign its certificate request (using the `certificate`
face's `sign` action).
EOT
Puppet::CloudPack.add_init_options(self)
when_invoked do |server, options|
Puppet::CloudPack.init(server, options)
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node_aws.rb
|
require 'puppet/face'
Puppet::Face.define(:node_aws, '0.0.1') do
copyright "Puppet Labs", 2011 .. 2013
license "Apache 2 license; see COPYING"
summary "View and manage Amazon AWS EC2 nodes."
description <<-'EOT'
This subcommand provides a command line interface to work with Amazon EC2
machine instances. The goal of these actions is to easily create new
machines, install Puppet onto them, and tear them down when they're no longer
required.
EOT
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/cloudpack/installer.rb
|
require 'erb'
require 'puppet'
require 'puppet/cloudpack'
module Puppet::CloudPack::Installer
class << self
def build_installer_template(name, options = {})
# binding is a kernel method
ERB.new(File.read(find_template(name))).result(binding)
end
def lib_script_dir
File.join(File.expand_path(File.dirname(__FILE__)), 'scripts')
end
def find_builtin_templates
templates_dir = lib_script_dir
templates = []
Dir.open(templates_dir) do |dir|
dir.each do |entry|
next if File.directory?(File.join(templates_dir, entry))
if entry.length > '.erb'.length && entry.end_with?('.erb')
templates << entry[0 .. -'.erb'.length-1]
end
end
end
templates
end
def find_template(name)
user_script = File.join(Puppet[:confdir], 'scripts', "#{name}.erb")
return user_script if File.exists?(user_script)
lib_script = File.join(lib_script_dir, "#{name}.erb")
if File.exists?(lib_script)
lib_script
else
raise ArgumentError, "Could not find installer script template for #{name}"
end
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
acceptance_tests/tests/create_ec2_instance.rb
|
# variables that need to be set at a higher level
ami='ami-06ad526f'
login='ubuntu'
keypair=@options[:keypair]
keyfile=@options[:keyfile]
# this method can be used to parse out the nodes
# that are described in STDOUT from the list
# action
def parse_node_list(stdout)
node_status = {}
entries = stdout.split("\ni")
entries.each do |entry|
status = {}
# the first i did not get split out
unless entry =~ /^i/
entry='i'+entry
end
status_lines = entry.split("\n")
# git rid of the first line
# I don't care about the ids
status_lines.shift
status_lines.each do |status_line|
status_line.gsub!(/^ /, '')
attr = status_line.split(': ')
if attr.size == 2
status[attr[0]] = attr[1]
end
end
# hash all of the properties by the dns_name of the node
# I only care about entries that have a listed dnsname
# the dns_names are deallocated when the machine shuts
# down
if status['dns_name']
node_status[status['dns_name']] = status
end
end
node_status
end
controller = nil
test_name "Test Puppet Cloud Provisioner: create, install, list and destroy"
# find the controller node
hosts.each do |host|
if host['roles'].include? 'controller'
controller = host
end
end
step 'test that we can create ec2 instance(s)'
agent_dnsname=nil
on controller, "puppet node_aws create -i #{ami} --keypair #{keypair} --type t1.micro" do
# set the dnsname as the last line returned
agent_dnsname=stdout.split("\n").last
end
step "test that we can list the created instances: #{agent_dnsname}"
nodes_info=nil
on controller, 'puppet node_aws list' do
nodes_info=parse_node_list(stdout)
assert_equal('running', nodes_info[agent_dnsname]['state'], "Failed to launch an agent EC2 instance")
end
# copy the ec2 private key to the controller
# I would rather use ssh forwarding if I can
scp_to controller, File.expand_path(keyfile), "/root/.ssh/#{File.basename(keyfile)}"
step "test that we can install PE agent on #{agent_dnsname}"
agent_certname=nil
# I would like to be able to see stdout/err even when it fails?
on controller, "puppet node install --keyfile /root/.ssh/#{File.basename(keyfile)} --login #{login} --install-script=gems --server #{master} --debug --verbose --trace #{agent_dnsname}", :acceptable_exit_codes => [ 0 ] do
agent_certname = stdout.split("\n").last.chomp
end
on controller, "puppet certificate sign #{agent_certname} --ca-location remote --mode agent" do
end
step "test that we can destroy the created instances: #{agent_dnsname}"
on controller, "puppet node_aws terminate #{agent_dnsname}" do
last_line = stdout.split("\n").last
assert_match(/Destroying #{nodes_info[agent_dnsname]['id']} \(#{agent_dnsname}\) ... Done/, last_line, 'Failed to destroy instance')
end
# instead of puppet node list, maybe I should use fog to double check?
on controller, 'puppet node_aws list' do
nodes_list = parse_node_list(stdout)
if my_node = nodes_list[agent_dnsname]
assert(my_node['state'] == 'shutting-down' || my_node['state'] == 'terminated', "Node: #{agent_dnsname} was not shut down")
end
# I would like to fail if there are any extra instances?
# but I cant if I share the key
nodes_list.each do |name, my_node|
unless my_node['state'] == 'shutting-down' || my_node['state'] == 'terminated'
puts "Warning: node #{name} is not shut down"
end
# fail if any instances are running
end
end
|
olindata/puppetlabs-cloud_provisioner
|
spec/unit/puppet/face/node_aws/bootstrap_spec.rb
|
<reponame>olindata/puppetlabs-cloud_provisioner
require 'spec_helper'
require 'puppet/cloudpack'
describe Puppet::Face[:node_aws, :current] do
before :each do
@options = {
:image => 'ami-12345',
:keyname => 'some_keypair',
:login => 'ubuntu',
:keyfile => 'file_on_disk.txt',
:installer_payload => 'some.tar.gz',
:installer_answers => 'some.answers',
:node_group => 'webserver',
:type => 'm1.small',
}
end
describe 'option validation' do
before :each do
Puppet::CloudPack.expects(:bootstrap).never
end
describe '(image)' do
it 'should require an image' do
@options.delete(:image)
expect { subject.bootstrap(@options) }.to raise_error ArgumentError, /required/
end
end
describe '(keypair)' do
it 'should require a keypair name' do
@options.delete(:keyname)
expect { subject.bootstrap(@options) }.to raise_error ArgumentError, /required/
end
end
describe '(login)' do
it 'should require a login' do
@options.delete(:login)
expect { subject.bootstrap(@options) }.to raise_error ArgumentError, /required/
end
end
describe '(keyfile)' do
it 'should require a keyfile' do
@options.delete(:keyfile)
expect { subject.bootstrap(@options) }.to raise_error ArgumentError, /required/
end
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
spec/unit/puppet/face/node/init_spec.rb
|
require 'spec_helper'
require 'puppet/cloudpack'
describe Puppet::Face[:node, :current] do
before :each do
@options = {
:login => 'ubuntu',
:keyfile => 'file_on_disk.txt',
:installer_payload => 'some.tar.gz',
:installer_answers => 'some.answers',
:node_group => 'webserver'
}
end
describe 'option validation' do
before :each do
Puppet::CloudPack.expects(:init).never
end
describe '(login)' do
it 'should require a login' do
@options.delete(:login)
expect { subject.init('server', @options) }.to raise_error ArgumentError, /required/
end
end
describe '(keyfile)' do
it 'should require a keyfile' do
@options.delete(:keyfile)
expect { subject.init('server', @options) }.to raise_error ArgumentError, /required/
end
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/cloudpack/gce.rb
|
require 'puppet/cloudpack'
# This is a container for various horrible procedural code used to set up the
# face actions for the `node_gce` face. It lives here because the design of
# faces -- reinventing the Ruby object model, poorly -- makes it impossible to
# do standard things such as module inclusion, or inheritance, that would
# normally solve these problems in a real OO system.
module Puppet::CloudPack::GCE
module_function
def options(to, *which)
which.each do |name|
send("add_#{name}", to)
end
end
def add_project(to)
to.option '--project SCP-1125' do
summary 'The project to list instances from'
required
end
end
def add_zone(to, with_default = true)
to.option '--zone us-central1-a' do
summary 'Limit to instances in the specified zone'
with_default and default_to { 'us-central1-a' }
end
end
def add_wait(to)
to.option '--[no-]wait' do
summary 'wait for instance creation to complete before returning'
default_to { true }
end
end
def add_image(to)
to.option '--image <name|url>' do
summary 'the disk image name, or full URL, to boot from'
required
end
to.option '--image-search <project, project>' do
summary 'the additional projects to search for images by name'
description <<-EOT
The additional projects to search for images by name.
Google Compute supplies a number of default images, but they live
in their own little world -- distinct projects. This allows you to
set the search path for images specified by name.
It should be a comma separated list of projects.
EOT
default_to do
require 'puppet/google_api'
Puppet::GoogleAPI::StandardImageProjects.join(',')
end
before_action do |action, args, options|
# Fun times, but for consistency to the user...
options[:image_search] = options[:image_search].split(',').map(&:strip)
end
end
end
def add_login(to)
to.option '--login <username>', '-l <username>', '--username <username>' do
summary 'The login user to create on the target system.'
description <<-EOT
The login user to create on the target system. This, along with the
SSH public key, is added to the instance metadata -- which in turn will
cause the Google supplied scripts to create the appropriate account
on the instance.
EOT
end
to.option '--key <keyname | path>' do
summary 'The SSH keypair name or file to install on the created user account.'
description <<-EOT
The SSH keypair name or file to install on the created user account.
The normal value is a keypair name -- relative to ~/.ssh -- that is used
to locate the private and public keys. On the target system, only the
public key is stored. The private key never leaves your machine.
EOT
default_to do
if Pathname('google_compute_engine').expand_path('~/.ssh').exist?
'google_compute_engine'
else
'id_rsa'
end
end
before_action do |action, args, options|
# First, make sure the pathname is absolute; this turns relative names
# into names relative to the .ssh directory, but preserves an
# absolute path.
key = Pathname(options[:key]).expand_path('~/.ssh')
# Figure out if we got pointed to the public key; we keep this option
# pointing at the private key by convention.
if key.read =~ /PUBLIC KEY|^ssh-/ and key.extname.downcase == '.pub'
key = key.sub_ext('')
end
# Now, verify that we are pointed to a private key file.
unless key.read =~ /PRIVATE KEY/
raise <<EOT
SSH keypair #{options[:key]} does not have private and public key data where I
expect it to be, and I can't figure out how to locate the right parts.
We assume that the private key material is in `.../example-key`, and that the
public key material is in a corresponding `.../example-key.pub` file.
If the option is relative, we assume the base directory is `~/.ssh`.
Please point the key option at the private key file, and put the public key in
place next to it with an additional `.pub` extension.
EOT
end
# Finally, update the option to reflect our changes.
options[:key] = key.to_s
end
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/face/node_aws/list_keynames.rb
|
require 'puppet/face/node_aws'
require 'puppet/cloudpack'
Puppet::Face.define :node_aws, '0.0.1' do
action :list_keynames do
summary 'List available AWS EC2 key names.'
description <<-'EOT'
This action lists the available AWS EC2 key names and their fingerprints.
Any key name from this list is a valid argument for the create action's
--keyname option.
EOT
Puppet::CloudPack.add_list_keynames_options(self)
when_invoked do |options|
Puppet::CloudPack.list_keynames(options)
end
when_rendering :console do |value|
value.collect do |key_hash|
"#{key_hash['name']} (#{key_hash['fingerprint']})"
end.sort.join("\n")
end
returns 'Array of attribute hashes containing information about each key pair'
examples <<-'EOT'
List the available key pairs:
$ puppet node_aws list_keynames
cody (58:c6:4f:3e:b5:51:e0:ec:49:55:4e:98:43:8f:28:f3:9a:14:c8:a3)
jeff (fdf8:f53e:61e4::18:47:74:9c:f7:b2:b0:b9:ab:3a:25:d0:28)
matt (4b:8c:8d:a9:e5:88:6a:47:b7:8b:97:c5:77:e7:b7:6f:fd:b9:64:b3)
Get the key pair list as an array of JSON hashes:
$ puppet node_aws list_keynames --render-as json
[{"name":"cody","fingerprint":"58:c6:4f:3e:b5:51:e0:ec:49:55:4e:98:43:8f:28:f3:9a:14:c8:a3"},
{"name":"jeff","fingerprint":"6e:b6:0a:27:5b:67:cd:8b:47:74:9c:f7:b2:b0:b9:ab:3a:25:d0:28"},
{"name":"matt","fingerprint":"4b:8c:8d:a9:e5:88:6a:47:b7:8b:97:c5:77:e7:b7:6f:fd:b9:64:b3"}]
EOT
end
end
|
olindata/puppetlabs-cloud_provisioner
|
lib/puppet/application/node_aws.rb
|
require 'puppet/application/face_base'
class Puppet::Application::Node_aws < Puppet::Application::FaceBase
end
|
olindata/puppetlabs-cloud_provisioner
|
spec/spec_helper.rb
|
unless defined?(SPEC_HELPER_IS_LOADED)
SPEC_HELPER_IS_LOADED = 1
dir = File.expand_path(File.dirname(__FILE__))
$LOAD_PATH.unshift("#{dir}/")
$LOAD_PATH.unshift("#{dir}/lib") # a spec-specific test lib dir
$LOAD_PATH.unshift("#{dir}/../lib")
# Don't want puppet getting the command line arguments for rake or autotest
ARGV.clear
require 'puppet'
require 'puppet/face'
require 'puppet/cloudpack'
require 'mocha'
require 'fog'
require 'rspec'
Fog.credentials_path = File.join(dir, 'fog-stub-configuration')
Fog.mock!
# So everyone else doesn't have to include this base constant.
module PuppetSpec
FIXTURE_DIR = File.join(dir = File.expand_path(File.dirname(__FILE__)), "fixtures") unless defined?(FIXTURE_DIR)
end
module PuppetTest
end
RSpec.configure do |config|
config.mock_with :mocha
config.after :each do
Puppet.settings.clear
Puppet::Node::Environment.clear
Puppet::Util::Storage.clear
if defined?($tmpfiles)
$tmpfiles.each do |file|
file = File.expand_path(file)
if Puppet.features.posix? and file !~ /^\/tmp/ and file !~ /^\/var\/folders/
puts "Not deleting tmpfile #{file} outside of /tmp or /var/folders"
next
elsif Puppet.features.microsoft_windows?
tempdir = File.expand_path(File.join(Dir::LOCAL_APPDATA, "Temp"))
if file !~ /^#{tempdir}/
puts "Not deleting tmpfile #{file} outside of #{tempdir}"
next
end
end
if FileTest.exist?(file)
system("chmod -R 755 '#{file}'")
system("rm -rf '#{file}'")
end
end
$tmpfiles.clear
end
Puppet::Util::Log.close_all
end
config.before :each do
# these globals are set by Application
$puppet_application_mode = nil
$puppet_application_name = nil
# Set the confdir and vardir to gibberish so that tests
# have to be correctly mocked.
Puppet[:confdir] = "/dev/null"
Puppet[:vardir] = "/dev/null"
# Avoid opening ports to the outside world
Puppet.settings[:bindaddress] = "127.0.0.1"
@logs = []
Puppet::Util::Log.newdestination(Puppet::Test::LogCollector.new(@logs))
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
spec/unit/puppet/face/node_aws/create_spec.rb
|
<filename>spec/unit/puppet/face/node_aws/create_spec.rb
require 'spec_helper'
require 'puppet/cloudpack'
describe Puppet::Face[:node_aws, :current] do
before :all do
data = Fog::Compute::AWS::Mock.data['us-east-1'][Fog.credentials[:aws_access_key_id]]
data[:images]['ami-12345'] = { 'imageId' => 'ami-12345' }
data[:key_pairs]['some_keypair'] = { 'keyName' => 'some_keypair' }
data[:security_groups]['A'] = { 'groupName' => 'A', 'groupId' => 'sg-deadbeef' }
data[:security_groups]['D'] = { 'groupName' => 'D', 'groupId' => 'sg-deafd0d0' }
end
let :options do
{
:image => 'ami-12345',
:type => 'm1.small',
:keyname => 'some_keypair',
:region => 'us-east-1',
}
end
describe 'option validation' do
before :each do
Puppet::CloudPack.expects(:create).never
end
describe '(tags)' do
it 'should exit on improper value' do
options[:instance_tags] = 'tag1=value2,tag2=value,=broken'
expect { subject.create(options) }.to raise_error ArgumentError, /could not parse/i
end
it 'should produce a hash correctly' do
options[:instance_tags] = 'tag1=value1,tag2=value2,tag3=value3.1=value3.2'
Puppet::CloudPack.expects(:create).with() do |opts|
opts[:instance_tags].should == {
'tag1' => 'value1',
'tag2' => 'value2',
'tag3' => 'value3.1=value3.2'
}
end
subject.create(options)
end
end
describe '(type)' do
it 'should require a type' do
options.delete(:type)
expect { subject.create(options) }.to raise_error ArgumentError, /required/
end
end
describe '(image)' do
it 'should require an image' do
options.delete(:image)
expect { subject.create(options) }.to raise_error ArgumentError, /required/
end
it 'should validate the image name' do
options[:image] = 'RejectedImageName'
expect { subject.create(options) }.to raise_error ArgumentError,
/unrecognized.*: #{options[:image]}/i
end
end
describe '(keyname)' do
it 'should require a keyname' do
options.delete(:keyname)
expect { subject.create(options) }.to raise_error ArgumentError, /required/
end
it 'should validate the image name' do
options[:keyname] = 'RejectedKeypairName'
expect { subject.create(options) }.to raise_error ArgumentError,
/unrecognized.*: #{options[:keyname]}/i
end
end
describe '(region)' do
it "should set the region to us-east-1 if no region is supplied" do
# create a connection before we start fiddling with the options
connection = Puppet::CloudPack.create_connection(options)
options.delete(:region)
Puppet::CloudPack.expects(:create)
# Note that we need to provide the return value so that
# no exceptions are thrown from the code which calls
# the create_connection method and expects it to return
# something reasonable (i.e. non-nil)
Puppet::CloudPack.stubs(:create_connection).with() do |opts|
opts[:region].should == 'us-east-1'
end.returns(connection)
subject.create(options)
end
it 'should validate the region' do
options[:region] = 'mars-east-100'
expect { subject.create(options) }.to raise_error ArgumentError, /Unknown region/
end
end
describe '(security-group)' do
it 'should call group_option_before_action' do
options[:security_group] = %w[ A B C D E ].join(File::PATH_SEPARATOR)
Puppet::CloudPack.expects(:create)
# This makes sure the before_action calls the group_option_before_action
# correctly with the options we've specified.
Puppet::CloudPack.stubs(:group_option_before_action).with() do |opts|
opts[:security_group].should == options[:security_group]
end
subject.create(options)
end
it 'should validate all group names' do
options[:security_group] = %w[ A B C ]
# note that the group 'A' is mocked to be known to AWS in the 'before :all' block
# at the start of this file
expect { subject.create(options) }.to raise_error ArgumentError,
/unrecognized.*: #{Regexp.quote(%w[ B C ].join(', '))}/i
end
it 'should produce an array of security group IDs correctly' do
options[:security_group] = %w[ sg-deadbeef D ].join(File::PATH_SEPARATOR)
Puppet::CloudPack.expects(:create).with() do |opts|
opts[:security_group].should == %w[ sg-deadbeef sg-deafd0d0 ]
end
subject.create(options)
end
end
end
end
|
olindata/puppetlabs-cloud_provisioner
|
spec/unit/puppet/face/node/install_spec.rb
|
<reponame>olindata/puppetlabs-cloud_provisioner<filename>spec/unit/puppet/face/node/install_spec.rb<gh_stars>1-10
require 'spec_helper'
require 'puppet/cloudpack'
require 'tempfile'
describe Puppet::Face[:node, :current] do
# we need to keep references to the Tempfile's we create
# otherwise the created temporary files may get deleted
# too early when the corresponding Tempfile objects are
# garbage collected
let :tempfiles do {} end
let :basic_options do
tempfiles[:keyfile] = Tempfile.new(['file_on_disk', '.txt'])
{
:login => 'ubuntu',
:keyfile => tempfiles[:keyfile].path,
:facts => 'fact1=value1,fact2=value2,fact3=value3.1=value3.2',
}
end
let :options do
tempfiles[:installer_payload] = Tempfile.new(['some', '.tar.gz'])
tempfiles[:installer_answers] = Tempfile.new(['some', '.answers'])
basic_options.merge({
:installer_payload => tempfiles[:installer_payload].path,
:installer_answers => tempfiles[:installer_answers].path,
})
end
before :each do
ENV['SSH_AUTH_SOCK'] = '/tmp/foo.socket'
end
after :each do
tempfiles.each_value do |tempfile|
tempfile.close!()
end
end
describe 'option validation' do
before :each do
Puppet::CloudPack.expects(:install).never
end
describe '(login)' do
it 'should require a login' do
options.delete(:login)
expect { subject.install('server', options) }.to raise_error ArgumentError, /required.* login/
end
end
describe '(keyfile)' do
it 'should require a keyfile' do
no_keyfile_options = Hash[options]
no_keyfile_options.delete(:keyfile)
expect { subject.install('server', no_keyfile_options) }.to raise_error ArgumentError, /required.* keyfile/
end
it 'should validate the keyfile name for existence' do
opts = options.update :keyfile => '/dev/null/nonexistent.file'
expect { subject.install('server', opts) }.to raise_error ArgumentError, /could not find.*nonexistent\.file'/i
end
it 'should validate the keyfile name for readability' do
File.chmod 0300, options[:keyfile]
expect { subject.install('server', options) }.to raise_error ArgumentError, /could not read.* file/i
end
end
describe '(install-script)' do
it %q(should default to 'puppet-enterprise') do
Puppet::CloudPack.expects(:install).with do |server, options|
options[:install_script] == 'puppet-enterprise'
end
subject.install('server', options)
end
it 'should be possible to set install script' do
Puppet::CloudPack.expects(:install).with do |server, options|
options[:install_script] == 'puppet-community'
end
subject.install('server', options.merge(:install_script => 'puppet-community'))
end
it 'should validate that the installer script template is available for the specified script' do
# create a temporary file in the directory where the builtin installer script templates live
install_script_tempfile = Tempfile.new(['puppet-nonexistent', '.erb'], Puppet::CloudPack::Installer.lib_script_dir)
# note its name
nonexistent_install_script = File.basename(install_script_tempfile.path, '.erb')
# and delete it, so that it is really noexistent
install_script_tempfile.close!()
expect { subject.install('server', basic_options.merge(:install_script => nonexistent_install_script)) }.to raise_error ArgumentError, /could not find .*installer script template/i
end
end
describe '(facts)' do
let(:facts_hash) do { 'fact1' => 'value1', 'fact2' => 'value2', 'fact3' => 'value3.1=value3.2' }; end
it 'should produce a hash correctly' do
Puppet::CloudPack.expects(:install).with do |server,options|
options[:facts] == facts_hash
end
subject.install('server', options)
end
it 'should exit on improper value' do
options[:facts] = 'fact1=value1,fact2=val,ue2,fact3=value3.1=value3.2'
expect { subject.install('server', options) }.to raise_error ArgumentError, /could not parse.* facts/i
end
end
describe '(installer-payload)' do
it 'should validate the installer payload for existence' do
opts = options.update :installer_payload => '/dev/null/nonexistent.file'
expect { subject.install('server', opts) }.to raise_error ArgumentError, /could not find.*nonexistent\.file/i
end
['http://foo:8080', 'https://bar', 'ftp://baz'].each do |url|
it "should not validate the installer payload for file existance when it is a url: #{url}" do
Puppet::CloudPack.expects(:install)
opts = options.update :installer_payload => url
subject.install('server', opts)
end
end
it 'should detect invalid urls' do
opts = options.update :installer_payload => 'invalid path'
expect { subject.install('server', opts) }.to raise_error ArgumentError, /invalid input.* installer-payload/i
end
it 'should validate the installer payload for readability' do
File.chmod 0300, options[:installer_payload]
expect { subject.install('server', options) }.to raise_error ArgumentError, /could not read.* file/i
end
it 'should warn if the payload does not have either tgz or gz extension' do
tempfiles[:installer_payload] = Tempfile.new(['foo', '.tar'])
tempfiles[:installer_answers] = Tempfile.new(['some', '.answers'])
options = basic_options.merge({
:installer_payload => tempfiles[:installer_payload].path,
:installer_answers => tempfiles[:installer_answers].path,
})
Puppet.expects(:warning).with("Option: intaller-payload expects a .tgz or .gz file")
Puppet::CloudPack.expects(:install)
subject.install('server', options)
end
it %q(should require the installer payload if the install script is 'puppet-enterprise') do
no_payload_options = options.merge(:install_script => 'puppet-enterprise')
no_payload_options.delete(:installer_payload)
expect { subject.install('server', no_payload_options) }.to raise_error ArgumentError, /must specify.* installer payload/i
end
end
describe '(installer-answers)' do
it 'should validate the answers file for existence' do
opts = options.update :installer_answers => '/dev/null/nonexistent.file'
expect { subject.install('server', opts) }.to raise_error ArgumentError, /could not find.*nonexistent\.file/i
end
it 'should validate the answers file for readability' do
File.chmod 0300, options[:installer_answers]
expect { subject.install('server', options) }.to raise_error ArgumentError, /could not read.* file/i
end
it %q(should require an answers file if the install script is 'puppet-enterprise') do
expect { subject.install('server', basic_options.merge(:install_script => 'puppet-enterprise')) }.to raise_error ArgumentError, /must specify.* answers file/i
end
it %q(should require an answers file if the install script starts with 'puppet-enterprise-') do
expect { subject.install('server', basic_options.merge(:install_script => 'puppet-enterprise-http')) }.to raise_error ArgumentError, /must specify.* answers file/i
end
end
describe '(puppet-version)' do
['2.7.x', 'master', '2.6.9'].each do |version|
it "should accept valid value #{version}" do
opts = options.update :puppet_version => version
opts[:puppet_version].should == version
Puppet::CloudPack.expects(:install)
subject.install('server', options)
end
end
it 'should fail when invalid versions are specified' do
opts = options.update :puppet_version => '172.16.31.10'
expect { subject.install('server', opts) }.to raise_error(ArgumentError, /Invaid Puppet version/)
end
end
end
describe 'when installing as root and non-root' do
let(:ssh_remote_execute_results) do { :exit_code => 0, :stdout => 'stdout' }; end
let(:root_options) do { :login => 'root', :keyfile => 'agent', :install_script => 'puppet-community' }; end
let(:user_options) do { :login => 'ubuntu', :keyfile => 'agent', :install_script => 'puppet-community' }; end
it 'should use sudo when not root' do
# We (may) need a state machine here
installation = states('installation').starts_as('unstarted')
Puppet::CloudPack.expects(:ssh_remote_execute).returns(ssh_remote_execute_results).when(installation.is('unstarted')).then(installation.is('date_checked'))
Puppet::CloudPack.expects(:ssh_remote_execute).returns(ssh_remote_execute_results).when(installation.is('date_checked')).then(installation.is('installed'))
Puppet::CloudPack.expects(:ssh_remote_execute).returns(ssh_remote_execute_results).when(installation.is('installed')).then(installation.is('finished'))
Puppet::CloudPack.expects(:ssh_remote_execute).returns(ssh_remote_execute_results).with("server", user_options[:login], 'sudo puppet agent --configprint certname', nil).when(installation.is('finished'))
subject.install('server', user_options)
end
it 'should not use sudo when root' do
# We (may) need a state machine here
installation = states('installation').starts_as('unstarted')
Puppet::CloudPack.expects(:ssh_remote_execute).returns(ssh_remote_execute_results).when(installation.is('unstarted')).then(installation.is('date_checked'))
Puppet::CloudPack.expects(:ssh_remote_execute).returns(ssh_remote_execute_results).when(installation.is('date_checked')).then(installation.is('installed'))
Puppet::CloudPack.expects(:ssh_remote_execute).returns(ssh_remote_execute_results).when(installation.is('installed')).then(installation.is('finished'))
Puppet::CloudPack.expects(:ssh_remote_execute).returns(ssh_remote_execute_results).with("server", root_options[:login], 'puppet agent --configprint certname', nil).when(installation.is('finished'))
subject.install('server', root_options)
end
end
describe 'valid options' do
describe 'keyfile option' do
let(:user_options) do { :login => 'ubuntu', :keyfile => 'agent', :install_script => 'puppet-community' }; end
let(:ssh_remote_execute_results) do { :exit_code => 0, :stdout => 'stdout' }; end
it 'should support using keys from an agent' do
Puppet::CloudPack.expects(:install).once.with() do |server, received_options|
received_options[:keyfile] == user_options[:keyfile]
end
subject.install('server', user_options)
end
it 'should not pass the string agent to ssh' do
Puppet::CloudPack.expects(:ssh_remote_execute).times(4).with() do |server, login, command, keyfile|
keyfile.should be_nil
end.returns(ssh_remote_execute_results)
subject.install('server', user_options)
end
it 'should raise an error if SSH_AUTH_SOCK is not set' do
ENV['SSH_AUTH_SOCK'] = nil
expect { subject.install('server', user_options) }.to raise_error ArgumentError, /SSH_AUTH_SOCK/
end
end
describe 'puppetagent-certname option' do
let(:user_options) do { :login => 'ubuntu', :keyfile => 'agent', :install_script => 'puppet-community', :puppetagent_certname => 'jeffmaster' }; end
it 'should support setting the agent certificate name' do
Puppet::CloudPack.expects(:install).once.with() do |server, received_options|
received_options[:puppetagent_certname] == user_options[:puppetagent_certname]
end
subject.install('server', user_options)
end
end
end
end
|
diegotoral/cache-money
|
lib/cash/mock.rb
|
<filename>lib/cash/mock.rb
module Cash
class Mock < HashWithIndifferentAccess
attr_accessor :servers
def get_multi(keys)
slice(*keys).collect { |k,v| [k, Marshal.load(v)] }.to_hash
end
def set(key, value, ttl = 0, raw = false)
self[key] = marshal(value, raw)
end
def get(key, raw = false)
if raw
self[key]
else
if self.has_key?(key)
Marshal.load(self[key])
else
nil
end
end
end
def incr(key, amount = 1)
if self.has_key?(key)
self[key] = (self[key].to_i + amount).to_s
self[key].to_i
end
end
def decr(key, amount = 1)
if self.has_key?(key)
self[key] = (self[key].to_i - amount).to_s
self[key].to_i
end
end
def add(key, value, ttl = 0, raw = false)
if self.has_key?(key)
"NOT_STORED\r\n"
else
self[key] = marshal(value, raw)
"STORED\r\n"
end
end
def append(key, value)
set(key, get(key, true).to_s + value.to_s, nil, true)
end
def namespace
nil
end
def flush_all
clear
end
def stats
{}
end
def reset_runtime
[0, Hash.new(0)]
end
private
def marshal(value, raw)
if raw
value.to_s
else
Marshal.dump(value)
end
end
def unmarshal(marshaled_obj)
Marshal.load(marshaled_obj)
end
def deep_clone(obj)
unmarshal(marshal(obj))
end
end
end
|
diegotoral/cache-money
|
lib/cash/query/abstract.rb
|
<filename>lib/cash/query/abstract.rb
module Cash
module Query
class Abstract
delegate :with_exclusive_scope, :get, :table_name, :indices, :find_from_ids_without_cache, :cache_key, :columns_hash, :to => :@active_record
def self.perform(*args)
new(*args).perform
end
def initialize(active_record, options1, options2)
@active_record, @options1, @options2 = active_record, options1, options2 || {}
end
def perform(find_options = {}, get_options = {})
if cache_config = cacheable?(@options1, @options2, find_options)
cache_keys, index = cache_keys(cache_config[0]), cache_config[1]
misses, missed_keys, objects = hit_or_miss(cache_keys, index, get_options)
format_results(cache_keys, choose_deserialized_objects_if_possible(missed_keys, cache_keys, misses, objects))
else
uncacheable
end
end
DESC = /DESC/i
def order
@order ||= begin
if order_sql = @options1[:order] || @options2[:order]
matched, table_name, column_name, direction = *(ORDER.match(order_sql))
[column_name, direction =~ DESC ? :desc : :asc]
else
['id', :asc]
end
end
end
def limit
@limit ||= @options1[:limit] || @options2[:limit]
end
def offset
@offset ||= @options1[:offset] || @options2[:offset] || 0
end
def calculation?
false
end
private
def cacheable?(*optionss)
optionss.each { |options| return unless safe_options_for_cache?(options) }
partial_indices = optionss.collect { |options| attribute_value_pairs_for_conditions(options[:conditions]) }
return if partial_indices.include?(nil)
attribute_value_pairs = partial_indices.sum.sort { |x, y| x[0] <=> y[0] }
if index = indexed_on?(attribute_value_pairs.collect { |pair| pair[0] })
if index.matches?(self)
[attribute_value_pairs, index]
end
end
end
def hit_or_miss(cache_keys, index, options)
misses, missed_keys = nil, nil
objects = @active_record.get(cache_keys, options.merge(:ttl => index.ttl)) do |missed_keys|
misses = miss(missed_keys, @options1.merge(:limit => index.window))
serialize_objects(index, misses)
end
[misses, missed_keys, objects]
end
def cache_keys(attribute_value_pairs)
attribute_value_pairs.flatten.join('/')
end
def safe_options_for_cache?(options)
return false unless options.kind_of?(Hash)
options.except(:conditions, :readonly, :limit, :offset, :order).values.compact.empty? && !options[:readonly]
end
def attribute_value_pairs_for_conditions(conditions)
case conditions
when Hash
conditions.to_a.collect { |key, value| [key.to_s, value] }
when String
parse_indices_from_condition(conditions)
when Array
parse_indices_from_condition(*conditions)
when NilClass
[]
end
end
AND = /\s+AND\s+/i
TABLE_AND_COLUMN = /(?:(?:`|")?(\w+)(?:`|")?\.)?(?:`|")?(\w+)(?:`|")?/ # Matches: `users`.id, `users`.`id`, users.id, id
VALUE = /'?(\d+|\?|(?:(?:[^']|'')*))'?/ # Matches: 123, ?, '123', '12''3'
KEY_EQ_VALUE = /^\(?#{TABLE_AND_COLUMN}\s+=\s+#{VALUE}\)?$/ # Matches: KEY = VALUE, (KEY = VALUE)
ORDER = /^#{TABLE_AND_COLUMN}\s*(ASC|DESC)?$/i # Matches: COLUMN ASC, COLUMN DESC, COLUMN
def parse_indices_from_condition(conditions = '', *values)
values = values.dup
conditions.split(AND).inject([]) do |indices, condition|
matched, table_name, column_name, sql_value = *(KEY_EQ_VALUE.match(condition))
if matched
value = sql_value == '?' ? values.shift : columns_hash[column_name].type_cast(sql_value)
indices << [column_name, value]
else
return nil
end
end
end
def indexed_on?(attributes)
indices.detect { |index| index == attributes }
end
alias_method :index_for, :indexed_on?
def format_results(cache_keys, objects)
return objects if objects.blank?
objects = convert_to_array(cache_keys, objects)
objects = apply_limits_and_offsets(objects, @options1)
deserialize_objects(objects)
end
def choose_deserialized_objects_if_possible(missed_keys, cache_keys, misses, objects)
missed_keys == cache_keys ? misses : objects
end
def serialize_objects(index, objects)
Array(objects).collect { |missed| index.serialize_object(missed) }
end
def convert_to_array(cache_keys, object)
if object.kind_of?(Hash)
cache_keys.collect { |key| object[cache_key(key)] }.flatten.compact
else
Array(object)
end
end
def apply_limits_and_offsets(results, options)
results.slice((options[:offset] || 0), (options[:limit] || results.length))
end
def deserialize_objects(objects)
if objects.first.kind_of?(ActiveRecord::Base)
objects
else
cache_keys = objects.collect { |id| "id/#{id}" }
objects = get(cache_keys, &method(:find_from_keys))
convert_to_array(cache_keys, objects)
end
end
def find_from_keys(*missing_keys)
missing_ids = Array(missing_keys).flatten.collect { |key| key.split('/')[2].to_i }
find_from_ids_without_cache(missing_ids, {})
end
end
end
end
|
diegotoral/cache-money
|
spec/cash/lock_spec.rb
|
<filename>spec/cash/lock_spec.rb
require File.join(File.dirname(__FILE__), '..', 'spec_helper')
module Cash
describe Lock do
describe '#synchronize' do
it "yields the block" do
block_was_called = false
$lock.synchronize('lock_key') do
block_was_called = true
end
block_was_called.should == true
end
it "acquires the specified lock before the block is run" do
$memcache.get("lock/lock_key").should == nil
$lock.synchronize('lock_key') do
$memcache.get("lock/lock_key").should_not == nil
end
end
it "releases the lock after the block is run" do
$memcache.get("lock/lock_key").should == nil
$lock.synchronize('lock_key') {}
$memcache.get("lock/lock_key").should == nil
end
it "releases the lock even if the block raises" do
$memcache.get("lock/lock_key").should == nil
$lock.synchronize('lock_key') { raise } rescue nil
$memcache.get("lock/lock_key").should == nil
end
specify "does not block on recursive lock acquisition" do
$lock.synchronize('lock_key') do
lambda { $lock.synchronize('lock_key') {} }.should_not raise_error
end
end
end
describe '#acquire_lock' do
specify "creates a lock at a given cache key" do
$memcache.get("lock/lock_key").should == nil
$lock.acquire_lock("lock_key")
$memcache.get("lock/lock_key").should_not == nil
end
specify "retries specified number of times" do
$lock.acquire_lock('lock_key')
as_another_process do
mock($memcache).add("lock/lock_key", Process.pid, timeout = 10) { "NOT_STORED\r\n" }.times(3)
stub($lock).exponential_sleep
lambda { $lock.acquire_lock('lock_key', timeout, 3) }.should raise_error
end
end
specify "correctly sets timeout on memcache entries" do
mock($memcache).add('lock/lock_key', Process.pid, timeout = 10) { "STORED\r\n" }
$lock.acquire_lock('lock_key', timeout)
end
specify "prevents two processes from acquiring the same lock at the same time" do
$lock.acquire_lock('lock_key')
as_another_process do
lambda { $lock.acquire_lock('lock_key') }.should raise_error
end
end
def as_another_process
current_pid = Process.pid
stub(Process).pid { current_pid + 1 }
yield
end
end
describe '#release_lock' do
specify "deletes the lock for a given cache key" do
$memcache.get("lock/lock_key").should == nil
$lock.acquire_lock("lock_key")
$memcache.get("lock/lock_key").should_not == nil
$lock.release_lock("lock_key")
$memcache.get("lock/lock_key").should == nil
end
end
end
end
|
diegotoral/cache-money
|
lib/cash/query/calculation.rb
|
module Cash
module Query
class Calculation < Abstract
delegate :calculate_without_cache, :incr, :to => :@active_record
def initialize(active_record, operation, column, options1, options2)
super(active_record, options1, options2)
@operation, @column = operation, column
end
def perform
super({}, :raw => true)
end
def calculation?
true
end
protected
def miss(_, __)
calculate_without_cache(@operation, @column, @options1)
end
def uncacheable
calculate_without_cache(@operation, @column, @options1)
end
def format_results(_, objects)
objects.to_i
end
def serialize_objects(_, objects)
objects.to_s
end
def cacheable?(*optionss)
@column == :all && super(*optionss)
end
def cache_keys(attribute_value_pairs)
"#{super(attribute_value_pairs)}/#{@operation}"
end
end
end
end
|
diegotoral/cache-money
|
lib/cash/transactional.rb
|
module Cash
class Transactional
attr_reader :memcache
def initialize(memcache, lock)
@memcache, @cache = [memcache, memcache]
@lock = lock
end
def transaction
exception_was_raised = false
begin_transaction
result = yield
rescue Object => e
exception_was_raised = true
raise
ensure
begin
@cache.flush unless exception_was_raised
ensure
end_transaction
end
end
def method_missing(method, *args, &block)
@cache.send(method, *args, &block)
end
def respond_to?(method)
@cache.respond_to?(method)
end
private
def begin_transaction
@cache = Buffered.push(@cache, @lock)
end
def end_transaction
@cache = @cache.pop
end
end
end
|
diegotoral/cache-money
|
lib/cash/request.rb
|
module Cash
Request = {}
end
|
diegotoral/cache-money
|
spec/cash/write_through_spec.rb
|
require File.join(File.dirname(__FILE__), '..', 'spec_helper')
module Cash
describe WriteThrough do
describe 'ClassMethods' do
describe 'after create' do
it "inserts all indexed attributes into the cache" do
story = Story.create!(:title => "I am delicious")
Story.get("title/#{story.title}").should == [story.id]
Story.get("id/#{story.id}").should == [story]
end
describe 'multiple objects' do
it "inserts multiple objects into the same cache key" do
story1 = Story.create!(:title => "I am delicious")
story2 = Story.create!(:title => "I am delicious")
Story.get("title/#{story1.title}").should == [story1.id, story2.id]
end
describe 'when the cache has been cleared after some objects were created' do
before do
@story1 = Story.create!(:title => "I am delicious")
$memcache.flush_all
@story2 = Story.create!(:title => "I am delicious")
end
it 'inserts legacy objects into the cache' do
Story.get("title/#{@story1.title}").should == [@story1.id, @story2.id]
end
it 'initializes the count to account for the legacy objects' do
Story.get("title/#{@story1.title}/count", :raw => true).should =~ /2/
end
end
end
it "does not write through the cache on non-indexed attributes" do
story = Story.create!(:title => "Story 1", :subtitle => "Subtitle")
Story.get("subtitle/#{story.subtitle}").should == nil
end
it "indexes on combinations of attributes" do
story = Story.create!(:title => "Sam")
Story.get("id/#{story.id}/title/#{story.title}").should == [story.id]
end
it "does not cache associations" do
story = Story.new(:title => 'I am lugubrious')
story.characters.build(:name => 'How am I holy?')
story.save!
Story.get("id/#{story.id}").first.characters.loaded?.should_not be
end
it 'increments the count' do
story = Story.create!(:title => "Sam")
Story.get("title/#{story.title}/count", :raw => true).should =~ /1/
story = Story.create!(:title => "Sam")
Story.get("title/#{story.title}/count", :raw => true).should =~ /2/
end
describe 'when the value is nil' do
it "does not write through the cache on indexed attributes" do
story = Story.create!(:title => nil)
Story.get("title/").should == nil
end
end
end
describe 'after update' do
it "overwrites the primary cache" do
story = Story.create!(:title => "I am delicious")
Story.get(cache_key = "id/#{story.id}").first.title.should == "I am delicious"
story.update_attributes(:title => "I am fabulous")
Story.get(cache_key).first.title.should == "I am fabulous"
end
it "populates empty caches" do
story = Story.create!(:title => "I am delicious")
$memcache.flush_all
story.update_attributes(:title => "I am fabulous")
Story.get("title/#{story.title}").should == [story.id]
end
it "removes from the affected index caches on update" do
story = Story.create!(:title => "I am delicious")
Story.get(cache_key = "title/#{story.title}").should == [story.id]
story.update_attributes(:title => "I am fabulous")
Story.get(cache_key).should == []
end
it 'increments/decrements the counts of affected indices' do
story = Story.create!(:title => original_title = "I am delicious")
story.update_attributes(:title => new_title = "I am fabulous")
Story.get("title/#{original_title}/count", :raw => true).should =~ /0/
Story.get("title/#{new_title}/count", :raw => true).should =~ /1/
end
end
describe 'after destroy' do
it "removes from the primary cache" do
story = Story.create!(:title => "I am delicious")
Story.get(cache_key = "id/#{story.id}").should == [story]
story.destroy
Story.get(cache_key).should == []
end
it "removes from the the cache on keys matching the original values of attributes" do
story = Story.create!(:title => "I am delicious")
Story.get(cache_key = "title/#{story.title}").should == [story.id]
story.title = "I am not delicious"
story.destroy
Story.get(cache_key).should == []
end
it 'decrements the count' do
story = Story.create!(:title => "I am delicious")
story.destroy
Story.get("title/#{story.title}/count", :raw => true).should =~ /0/
end
describe 'when there are multiple items in the index' do
it "only removes one item from the affected indices, not all of them" do
story1 = Story.create!(:title => "I am delicious")
story2 = Story.create!(:title => "I am delicious")
Story.get(cache_key = "title/#{story1.title}").should == [story1.id, story2.id]
story1.destroy
Story.get(cache_key).should == [story2.id]
end
end
describe 'when the object is a new record' do
it 'does nothing' do
story1 = Story.new
mock(Story).set.never
story1.destroy
end
end
describe 'when the cache is not yet populated' do
it "populates the cache with data" do
story1 = Story.create!(:title => "I am delicious")
story2 = Story.create!(:title => "I am delicious")
$memcache.flush_all
Story.get(cache_key = "title/#{story1.title}").should == nil
story1.destroy
Story.get(cache_key).should == [story2.id]
end
end
describe 'when the value is nil' do
it "does not delete through the cache on indexed attributes when the value is nil" do
story = Story.create!(:title => nil)
story.destroy
Story.get("title/").should == nil
end
end
end
describe 'InstanceMethods' do
describe '#expire_caches' do
it 'deletes the index' do
story = Story.create!(:title => "I am delicious")
Story.get(cache_key = "id/#{story.id}").should == [story]
story.expire_caches
Story.get(cache_key).should be_nil
end
end
end
end
describe "Locking" do
it "acquires and releases locks, in order, for all indices to be written" do
pending
story = Story.create!(:title => original_title = "original title")
story.title = tentative_title = "tentative title"
keys = ["id/#{story.id}", "title/#{original_title}", "title/#{story.title}", "id/#{story.id}/title/#{original_title}", "id/#{story.id}/title/#{tentative_title}"]
locks_should_be_acquired_and_released_in_order($lock, keys)
story.save!
end
it "acquires and releases locks on destroy" do
pending
story = Story.create!(:title => "title")
keys = ["id/#{story.id}", "title/#{story.title}", "id/#{story.id}/title/#{story.title}"]
locks_should_be_acquired_and_released_in_order($lock, keys)
story.destroy
end
def locks_should_be_acquired_and_released_in_order(lock, keys)
mock = keys.sort!.inject(mock = mock($lock)) do |mock, key|
mock.acquire_lock.with(Story.cache_key(key)).then
end
keys.inject(mock) do |mock, key|
mock.release_lock.with(Story.cache_key(key)).then
end
end
end
describe "Single Table Inheritence" do
describe 'A subclass' do
it "writes to indices of all superclasses" do
oral = Oral.create!(:title => 'title')
Story.get("title/#{oral.title}").should == [oral.id]
Epic.get("title/#{oral.title}").should == [oral.id]
Oral.get("title/#{oral.title}").should == [oral.id]
end
describe 'when one ancestor has its own indices' do
it "it only populates those indices for that ancestor" do
oral = Oral.create!(:subtitle => 'subtitle')
Story.get("subtitle/#{oral.subtitle}").should be_nil
Epic.get("subtitle/#{oral.subtitle}").should be_nil
Oral.get("subtitle/#{oral.subtitle}").should == [oral.id]
end
end
end
end
end
end
|
diegotoral/cache-money
|
lib/cash/query/select.rb
|
module Cash
module Query
class Select < Abstract
delegate :find_every_without_cache, :to => :@active_record
protected
def miss(_, miss_options)
find_every_without_cache(miss_options)
end
def uncacheable
find_every_without_cache(@options1)
end
end
end
end
|
diegotoral/cache-money
|
spec/spec_helper.rb
|
<reponame>diegotoral/cache-money<filename>spec/spec_helper.rb
dir = File.dirname(__FILE__)
$LOAD_PATH.unshift "#{dir}/../lib"
require 'rubygems'
require 'spec'
require 'pp'
require 'cache_money'
require 'memcache'
require File.join(dir, '../config/environment')
Spec::Runner.configure do |config|
config.mock_with :rr
config.before :suite do
load File.join(dir, "../db/schema.rb")
config = YAML.load(IO.read((File.expand_path(File.dirname(__FILE__) + "/../config/memcache.yml"))))['test']
$memcache = MemCache.new(config)
$memcache.servers = config['servers']
$lock = Cash::Lock.new($memcache)
end
config.before :each do
$memcache.flush_all
Story.delete_all
Character.delete_all
end
config.before :suite do
ActiveRecord::Base.class_eval do
is_cached :repository => Cash::Transactional.new($memcache, $lock)
end
Character = Class.new(ActiveRecord::Base)
Story = Class.new(ActiveRecord::Base)
Story.has_many :characters
Story.class_eval do
index :title
index [:id, :title]
index :published
end
Epic = Class.new(Story)
Oral = Class.new(Epic)
Character.class_eval do
index [:name, :story_id]
index [:id, :story_id]
index [:id, :name, :story_id]
end
Oral.class_eval do
index :subtitle
end
end
end
|
dannyflatiron/mechanics-of-migrations-online-web-pt-110419
|
artist.rb
|
class Artist<ActiveRecord::Base
# this is extending the class with ActiveRecord
end
|
poise/poise-tls-remote-file
|
test/cookbook/recipes/default.rb
|
#
# Copyright 2017, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package 'nginx'
directory '/test'
cookbook_file '/test/ca.crt'
cookbook_file '/test/server.key'
cookbook_file '/test/server.crt'
cookbook_file '/test/client.key'
cookbook_file '/test/client.crt'
cookbook_file '/test/client.pem'
file '/test/target' do
content "Hello world\n"
end
file '/test/nginx.conf' do
content <<-EOH
daemon off;
master_process off;
worker_processes auto;
events { }
error_log /test/error.log;
http {
server {
listen 80;
server_name localhost;
location / {
root /test;
}
}
server {
listen 443;
ssl on;
server_name localhost;
ssl_certificate /test/server.crt;
ssl_certificate_key /test/server.key;
ssl_client_certificate /test/client.crt;
ssl_verify_client on;
location / {
root /test;
}
}
server {
listen 444;
ssl on;
server_name localhost;
ssl_certificate /test/server.crt;
ssl_certificate_key /test/server.key;
location / {
root /test;
}
}
}
EOH
end
poise_service 'nginx' do
command 'nginx -c /test/nginx.conf'
provider :dummy
end
tls_remote_file '/output' do
source 'https://localhost/target'
client_cert '/test/client.crt'
client_key '/test/client.key'
ca '/test/ca.crt'
end
tls_remote_file '/output2' do
source 'https://localhost/target'
client_cert '/test/client.pem'
ca '/test/ca.crt'
end
# Test with no client key, just normal HTTPS as fallback.
tls_remote_file '/output3' do
source 'https://localhost:444/target'
ca '/test/ca.crt'
end
# And even more fallback, just plain HTTP.
tls_remote_file '/output4' do
source 'http://localhost/target'
end
# HTTP even with a CA cert.
tls_remote_file '/output5' do
source 'http://localhost/target'
ca '/test/ca.crt'
end
# Make sure I didn't break normal remote_file.
remote_file '/output6' do
source 'http://localhost/target'
end
|
poise/poise-tls-remote-file
|
test/spec/resources/poise_tls_remote_file_spec.rb
|
<reponame>poise/poise-tls-remote-file
#
# Copyright 2017, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'spec_helper'
describe PoiseTlsRemoteFile::Resources::PoiseTlsRemoteFile do
step_into(:tls_remote_file)
let(:tempfile) { Tempfile.new('chefout') }
let(:stub_http_response) { double('Net::HTTPResponse', http_version: '1.1', code: '200', msg: 'OK') }
let(:stub_cert_store) { double('OpenSSL::X509::Store') }
let(:stub_http) { double('Net::HTTP', proxy_address: nil, cert_store: stub_cert_store) }
before { override_attributes['test_tempfile'] = tempfile.path }
after { tempfile.close! }
before do
# Stub file loading.
allow(IO).to receive(:read).and_call_original
allow(IO).to receive(:read).with('/test/client.crt') { IO.read(File.expand_path('../../../cookbook/files/client.crt', __FILE__)) }
allow(IO).to receive(:read).with('/test/client.key') { IO.read(File.expand_path('../../../cookbook/files/client.key', __FILE__)) }
allow(IO).to receive(:read).with('/test/client.pem') { IO.read(File.expand_path('../../../cookbook/files/client.pem', __FILE__)) }
allow(IO).to receive(:read).with('/test/ca.crt') { IO.read(File.expand_path('../../../cookbook/files/ca.crt', __FILE__)) }
# Stub core HTTP stuffs.
allow(Net::HTTP).to receive(:new).and_return(stub_http)
allow(stub_http).to receive(:proxy_port=).with(nil)
allow(stub_http).to receive(:use_ssl=).with(true)
allow(stub_http).to receive(:verify_mode=).with(1)
allow(stub_http).to receive(:cert_store=)
allow(stub_http).to receive(:read_timeout=).with(300)
allow(stub_http).to receive(:open_timeout=).with(300)
allow(stub_http).to receive(:request).and_yield(stub_http_response)
allow(stub_cert_store).to receive(:set_default_paths)
allow(stub_http_response).to receive(:error!)
allow(stub_http_response).to receive(:each)
# Attributes.
override_attributes['poise-tls-remote-file'] = {}
end
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
end
end
CA_FINGERPRINT = 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:47:9e:fd:cd:53:e0:99:58'
CLIENT_FINGERPRINT = '84:9f:57:30:e7:74:d1:fd:d5:a2:d7:72:9c:02:a0:3c'
SERVER_FINGERPRINT = 'c9:cd:24:86:65:13:33:19:11:0f:0d:06:6f:63:3f:dd'
def expect_cert(fingerprint)
expect(stub_http).to receive(:cert=) do |cert|
expect(cert.public_key.fingerprint).to eq fingerprint
end
end
def expect_key(fingerprint)
expect(stub_http).to receive(:key=) do |key|
expect(key.fingerprint).to eq fingerprint
end
end
def expect_add_cert(fingerprint)
expect(stub_cert_store).to receive(:add_cert) do |cert|
expect(cert.public_key.fingerprint).to eq fingerprint
end
end
context 'with client_cert' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
client_cert '/test/client.crt'
end
end
it do
expect_cert(CLIENT_FINGERPRINT)
run_chef
end
end # /context with client_cert
context 'with client_key' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
client_key '/test/client.key'
end
end
it do
expect_key(CLIENT_FINGERPRINT)
run_chef
end
end # /context with client_key
context 'with both client_cert and client_key' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
client_cert '/test/client.crt'
client_key '/test/client.key'
end
end
it do
expect_cert(CLIENT_FINGERPRINT)
expect_key(CLIENT_FINGERPRINT)
run_chef
end
end # /context with both client_cert and client_key
context 'with ca string' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
ca '/test/ca.crt'
end
end
it do
expect_add_cert(CA_FINGERPRINT)
run_chef
end
end # /context with ca string
context 'with ca array' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
ca %w{/test/ca.crt /test/client.crt}
end
end
it do
expect_add_cert(CA_FINGERPRINT)
expect_add_cert(CLIENT_FINGERPRINT)
run_chef
end
end # /context with ca array
context 'with a literal client_cert' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
client_cert <<-EOH
-----<KEY>
EOH
end
end
it do
expect_cert(SERVER_FINGERPRINT)
run_chef
end
end # /context with a literal client_cert
context 'with a literal client_key' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
client_key <<-EOH
-----BEGIN RSA PRIVATE KEY-----
<KEY>
EOH
end
end
it do
expect_key(SERVER_FINGERPRINT)
run_chef
end
end # /context with a literal client_key
context 'with a literal ca' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'https://example.com/'
ca <<-EOH
-----<KEY>ERTIFICATE-----
EOH
end
end
it do
expect_add_cert(SERVER_FINGERPRINT)
run_chef
end
end # /context with a literal ca
context 'with node["poise-tls-remote-file"]["client_cert"]' do
before { override_attributes['poise-tls-remote-file']['client_cert'] = '/test/client.crt' }
it do
expect_cert(CLIENT_FINGERPRINT)
run_chef
end
end # /context with node["poise-tls-remote-file"]["client_cert"]
context 'with node["poise-tls-remote-file"]["client_key"]' do
before { override_attributes['poise-tls-remote-file']['client_key'] = '/test/client.key' }
it do
expect_key(CLIENT_FINGERPRINT)
run_chef
end
end # /context with node["poise-tls-remote-file"]["client_key"]
context 'with node["poise-tls-remote-file"]["ca"]' do
before { override_attributes['poise-tls-remote-file']['ca'] = '/test/ca.crt' }
it do
expect_add_cert(CA_FINGERPRINT)
run_chef
end
end # /context with node["poise-tls-remote-file"]["ca"]
context 'with no additional properties' do
it { expect { run_chef }.to_not raise_error }
end # /context with no additional properties
context 'with an HTTP URL' do
before do
allow(stub_http).to receive(:cert_store).and_return(nil)
end
recipe do
tls_remote_file node['test_tempfile'] do
source 'http://example.com/'
end
end
it { expect { run_chef }.to_not raise_error }
context 'with a CA cert' do
recipe do
tls_remote_file node['test_tempfile'] do
source 'http://example.com/'
ca '/test/ca.crt'
end
end
it { expect { run_chef }.to_not raise_error }
end # /context with a CA cert
end # /context with an HTTP URL
end
|
poise/poise-tls-remote-file
|
chef/attributes/default.rb
|
#
# Copyright 2017, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Default client_cert for all tls_remote_file resources.
default['poise-tls-remote-file']['client_cert'] = nil
# Default client_key for all tls_remote_file resources.
default['poise-tls-remote-file']['client_key'] = nil
# Default ca for all tls_remote_file resources.
default['poise-tls-remote-file']['ca'] = nil
|
poise/poise-tls-remote-file
|
lib/poise_tls_remote_file/resources/poise_tls_remote_file.rb
|
<gh_stars>0
#
# Copyright 2017, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'chef/resource/remote_file'
require 'chef/provider/remote_file'
module PoiseTlsRemoteFile
module Resources
# (see PoiseTlsRemoteFile::Resource)
# @since 1.0.0
module PoiseTlsRemoteFile
# A `tls_remote_file` resource to do something.
#
# @provides tls_remote_file
# @action run
# @example
# tls_remote_file '/path/to/file' do
# client_cert '/etc/ssl/client.crt'
# client_key '/etc/ssl/private/client.key'
# end
class Resource < Chef::Resource::RemoteFile
resource_name(:tls_remote_file)
def initialize(*args)
super
@provider = PoiseTlsRemoteFile::Provider if defined?(@provider)
end
property(:client_cert, kind_of: [String, NilClass], default: lazy { default_client_cert })
property(:client_key, kind_of: [String, NilClass], default: lazy { default_client_key })
property(:ca, kind_of: [String, Array, NilClass], default: lazy { default_ca })
def client_cert_obj
OpenSSL::X509::Certificate.new(maybe_read_file(client_cert)) if client_cert
end
def client_key_obj
if client_key
OpenSSL::PKey::RSA.new(maybe_read_file(client_key))
elsif client_cert
begin
OpenSSL::PKey::RSA.new(maybe_read_file(client_cert))
rescue OpenSSL::PKey::RSAError
# It didn't have a key in it, oh well.
nil
end
end
end
def ca_objs
Array(ca).map do |path|
OpenSSL::X509::Certificate.new(maybe_read_file(path)) if path
end
end
private
def default_client_cert
node['poise-tls-remote-file']['client_cert']
end
def default_client_key
node['poise-tls-remote-file']['client_key']
end
def default_ca
node['poise-tls-remote-file']['ca']
end
def maybe_read_file(path)
if path =~ /\A(\/|\w:)/
IO.read(path)
else
# Looks like a literal value.
path
end
end
end
# Provider for `tls_remote_file`.
#
# @see Resource
# @provides tls_remote_file
class Provider < Chef::Provider::RemoteFile
provides(:tls_remote_file)
def initialize(*args)
super
@content_class = PoiseTlsRemoteFile::Content
end
end
# Content class for `tls_remote_file`.
#
# @see Resource
class Content < Chef::Provider::RemoteFile::Content
def grab_file_from_uri(uri)
PoiseTlsRemoteFile::Fetcher.new(uri, @new_resource, @current_resource).fetch
end
end
# Fetcher class for `tls_remote_file`.
#
# @see Resource
class Fetcher < Chef::Provider::RemoteFile::HTTP
def fetch
client_cert = new_resource.client_cert_obj
client_key = new_resource.client_key_obj
ca = new_resource.ca_objs
begin
Chef::HTTP::Simple.singleton_class.send(:define_method, :new) do |*args|
super(*args).tap do |http_simple|
http_simple.singleton_class.prepend(Module.new {
define_method(:http_client) do |*inner_args|
super(*inner_args).tap do |client|
client.http_client.cert = client_cert if client_cert
client.http_client.key = client_key if client_key
# cert_store is nil if this is not an HTTPS URL.
ca.each {|cert| client.http_client.cert_store.add_cert(cert) if cert } if client.http_client.cert_store
end
end
})
end
end
super
ensure
Chef::HTTP::Simple.singleton_class.send(:remove_method, :new)
end
end
end
end
end
end
|
MaxLap/winever
|
lib/winever/cron_entry.rb
|
module Winever
class CronEntry
attr_accessor :cron_time, :task_folder, :task_name, :working_directory, :parameters, :cron_line
def self.from_cron_output cron_output, include_invalid=false
entries = cron_output.split("\n").reject(&:empty?).map{|o| new(o)}
entries = entries.select(&:valid?) unless include_invalid
entries
end
def initialize(cron_line)
@cron_line = cron_line
@cron_parts = cron_line.split("|", 5)
cron_time_string, @task_folder, @task_name, @working_directory, @parameters = @cron_parts
@cron_time = Winever::CronTime.new(cron_time_string)
end
def triggers
@cron_time.triggers
end
def valid?
invalid_reason.nil?
end
def invalid_reason
return "Doesn't match the Winever format" unless @cron_parts.length == 5
return "Doesn't have a task_name specified" if @task_name.nil? || @task_name.empty?
return "Problem with schedule: #{@cron_time.unsupported_reason}" unless @cron_time.supported?
nil
end
end
end
|
MaxLap/winever
|
lib/winever/cron_time.rb
|
<reponame>MaxLap/winever<filename>lib/winever/cron_time.rb
module Winever
class CronTime
attr_accessor :string, :parts, :minute, :hour, :day, :month, :dow
def initialize cron_time_string
@string = cron_time_string
@parts = Array.new(5, '')
string_parts = cron_time_string.split(/ +/)
@parts[0...string_parts.size] = string_parts
@minute, @hour, @day, @month, @dow = @parts
end
def triggers
# For now, we don't support anything other than specific time.
# But it is possible to handle almost all cron schedule options in the task scheduler of Windows.
# It doesn't help that win32-taskscheduler also seems to only support one trigger per task.
return [] unless supported?
trigger = {
:start_year => Date.today.year,
:start_month => Date.today.month,
:start_day => Date.today.day,
:start_hour => hour.to_i,
:start_minute => minute.to_i,
:trigger_type => Win32::TaskScheduler::TASK_TIME_TRIGGER_DAILY
}
[trigger]
end
def supported?
unsupported_reason.nil?
end
def unsupported_reason
return "Need 5 parts delimited by spaces" if parts.compact.reject(&:empty?).length != 5
return "Only '*' is supported for day, month and day or week parts" if [day, month, dow].detect{|v| v != '*'}
return "Only single number is supported for minute and hour parts" if [minute, hour].detect{|v| (v =~ /^\d+$/).nil? }
nil
end
end
end
|
MaxLap/winever
|
lib/winever/command_line.rb
|
module Winever
class CommandLine
def self.run_from_command_line_options
require 'optparse'
options = {}
OptionParser.new do |opts|
opts.banner = "Usage: whenever [options]"
opts.on('-i', '--update [identifier]', 'Default: full path to schedule.rb file') do |identifier|
options[:update] = true
options[:identifier] = identifier if identifier
end
opts.on('-c', '--clear [identifier]') do |identifier|
options[:clear] = true
options[:identifier] = identifier if identifier
end
opts.on('-s', '--set [variables]', 'Example: --set \'environment=staging&path=/my/sweet/path\'') do |set|
options[:set] = set if set
end
opts.on('-f', '--load-file [schedule file]', 'Default: config/schedule.rb') do |file|
options[:file] = file if file
end
opts.on('-k', '--cut [lines]', 'Cut lines from the top of the cronfile') do |lines|
options[:cut] = lines.to_i if lines
end
opts.on('-v', '--version') { puts "Winever v#{Winever::VERSION}"; exit(0) }
end.parse!
self.execute(options)
end
def self.execute options={}
new(options).run
end
def initialize options={}
@options = options
@options[:file] ||= 'config/schedule.rb'
@options[:cut] ||= 0
@options[:identifier] ||= default_identifier
unless File.exists?(@options[:file])
warn("[fail] Can't find file: #{@options[:file]}")
exit(1)
end
if [@options[:update], @options[:clear]].compact.length > 1
warn("[fail] Can only update or clear. Choose one.")
exit(1)
end
unless @options[:cut].to_s =~ /[0-9]*/
warn("[fail] Can't cut negative lines from the crontab #{options[:cut]}")
exit(1)
end
@options[:cut] = @options[:cut].to_i
end
def run
if @options[:update]
Winever::TaskManager.update_tasks(@options)
elsif @options[:clear]
Winever::TaskManager.clear_tasks(@options)
else
puts Winever::WheneverInterface.cron(@options)
puts "## [message] Above is your schedule file converted to cron-winever syntax; your crontab file /scheduled tasks were not updated."
puts "## [message] Run `winever --help' for more options."
exit(0)
end
end
#protected
def default_identifier
File.expand_path(@options[:file])
end
end
end
|
MaxLap/winever
|
lib/winever/whenever_interface.rb
|
module Winever
module WheneverInterface
def self.run_from_winever?
@run_from_winever || false
end
def self.remove_existing_tasks *names
@existing_tasks_to_remove ||= []
@existing_tasks_to_remove.concat(names.flatten)
end
def self.existing_tasks_to_remove
@existing_tasks_to_remove ||= []
end
def self.raw_cron options={}
# The output of whenever with the custom job_types and job_template.
options[:file] ||= 'config/schedule.rb'
options[:cut] ||= 0
options[:identifier] ||= File.expand_path(options[:file])
schedule = if options[:string]
options[:string]
elsif options[:file]
File.read(options[:file])
end
# Prepending out own setup for the schedule to override the existing job_types and job_template.
options[:string] = File.read(File.dirname(__FILE__)+"/setup_schedule.rb") + "\n" + schedule
@run_from_winever = true
output = Whenever.cron(options)
@run_from_winever = false
output
end
def self.valid_cron_entries options={}
# Array of CronEntry containing only the entry that we support.
Winever::CronEntry.from_cron_output(raw_cron(options))
end
def self.all_cron_entries options={}
# Array of CronEntry containing only the entry that we support.
Winever::CronEntry.from_cron_output(raw_cron(options), true)
end
def self.cron options={}
# Content of a printable cron in internal Winever format. Also displays entry that are not handled and why.
entries = all_cron_entries(options)
valid_entries = entries.select(&:valid?)
invalid_entries = entries.reject(&:valid?)
output = "# Valid tasks for Winever in internal format:\n"
if !valid_entries.empty?
output << valid_entries.map(&:cron_line).join("\n\n")
else
output << "No valid entries"
end
output << "\n\n"
if !invalid_entries.empty?
output << "\n# Invalid entries for Winever in internal format:\n"
invalid_entries.each do |invalid_entry|
output << "# #{invalid_entry.invalid_reason}\n"
output << "#{invalid_entry.cron_line}\n\n"
end
end
if !existing_tasks_to_remove.empty?
if existing_tasks_to_remove.size <= 15
output << "\n# Additionnal task names that will be removed if they exist:\n"
existing_tasks_to_remove.each do |path|
output << "# - #{path}\n"
end
else
output << "\n# Additionnal task names that will be removed if they exist:\n"
output << "# (More than #{15} task names, not displaying.)\n"
end
output << "\n"
end
output
end
end
end
|
MaxLap/winever
|
lib/winever/task_manager.rb
|
<reponame>MaxLap/winever
module Winever
class TaskManager
def self.has_task_scheduler?
return @has_task_scheduler unless @has_task_scheduler.nil?
begin
require 'win32/taskscheduler'
@has_task_scheduler = true
rescue LoadError => e
@has_task_scheduler = false
end
@has_task_scheduler
end
def has_task_scheduler?
self.class.has_task_scheduler?
end
def self.clear_tasks options={}
new(options).clear_tasks_except
end
def self.update_tasks options={}
new(options).update_tasks
end
def initialize options={}
if !has_task_scheduler?
raise "Cannot use win32/taskscheduler on this system. Are you on windows?"
end
@options = options
end
def password
return @options[:password] if @options.has_key?(:password)
return ENV['WINEVER_PASSWORD'] if ENV['WINEVER_PASSWORD'] && !ENV['WINEVER_PASSWORD'].empty?
return @password.empty? ? nil : @password if @password
require 'highline/import'
prompt = <<-PRMP.gsub(/^ +/, '')
To setup tasks, the password of the current user account is needed.
Enter the password of the current windows user (or leave blank to avoid doing the changes):
PRMP
pw = ask(prompt){|q| q.echo = false}
while pw && !pw.empty? && !validate_password(pw)
prompt = <<-PRMP.gsub(/^ +/, '')
Invalid password entered.
Enter the password of the current windows user (or leave blank to avoid doing the changes):
PRMP
pw = ask(prompt){|q| q.echo = false}
end
#TODO get the password somehow.
# require File.expand_path('../../extensions/password', __FILE__)
# password = Password.ask("Enter password for current user account of the machine to setup tasks: ")
# validate_password(password)
@password = pw
pw.empty? ? nil : pw
end
def create_tasks
return if self.password.nil?
cron_entries = Winever::WheneverInterface.valid_cron_entries(@options)
created_task_names = []
cron_entries.each do |cron_entry|
created_task_names << create_task(cron_entry)
end
created_task_names
end
def update_tasks
return false if self.password.nil?
task_names = create_tasks
clear_tasks_except(task_names)
true
end
def clear_tasks_except keep_tasks=[]
ts = Win32::TaskScheduler.new
task_names = ts.tasks.select{|tn| tn.end_with?('.' + identifier)}
task_names.concat(Winever::WheneverInterface::existing_tasks_to_remove)
task_names = task_names.reject{|tn| keep_tasks.include?(tn)}
task_names.each{|tn| ts.delete(tn) if ts.exists?(tn)}
end
def create_task cron_entry
return if self.password.nil?
task_name = generate_task_name(cron_entry.task_name)
# Replacing the /dev/null by NUL
parameters = cron_entry.parameters.gsub(/([\s'"])\/dev\/null([\s'"])/, '\1NUL\2')
pw = password
trigger = cron_entry.triggers.first
work_directory = cron_entry.working_directory
ts = Win32::TaskScheduler.new(nil, nil, cron_entry.task_folder, true)
begin
ts.password = pw
ts.new_work_item(task_name, trigger)
ts.application_name = 'cmd'
ts.parameters = '/C ' + parameters
ts.working_directory = work_directory
ts.activate(task_name)
rescue
raise 'Failed at setting the task up. It might have been partially created/updated. This most likely means a bad password was entered.'
end
task_name
end
def generate_task_name task_name
"#{task_name}.#{identifier}"
end
def identifier
# Removing the characters blocked by the windows file system. The single quote is just for simplicity.
iden = @options[:identifier].gsub(/[:\/\\<>:"|?*']/, '_')
raise 'Identifier must contain at least one letter or number.' unless iden =~ /\w/
iden
end
def validate_password password
# Validate a password by trying to create a task with it. If it fails, then the password is wrong.
# Will delete the created task after.
ts = Win32::TaskScheduler.new
base_test_task_name = test_task_name = "Winever_test_task"
i = 0
while ts.exists?(test_task_name)
i += 1
test_task_name = "#{base_test_task_name}_#{i}"
end
trigger = { :start_year => 2000,
:start_month => 6,
:start_day => 12,
:start_hour => 13,
:start_minute => 17,
:trigger_type => Win32::TaskScheduler::TASK_TIME_TRIGGER_ONCE}
ts.new_work_item(test_task_name, trigger)
valid = false
begin
ts.password = password
ts.application_name = "cmd"
valid = true
rescue
ts.password = <PASSWORD>
valid = false
end
ts.delete(test_task_name)
return valid
end
end
end
|
MaxLap/winever
|
test/unit/cron_time_test.rb
|
<reponame>MaxLap/winever
require 'test_helper'
describe Winever::CronTime do
it "must support daily jobs with single specific time" do
Winever::CronTime.new('12 10 * * *').supported?.must_equal true
end
it "must only support 5 time parts" do
Winever::CronTime.new('12 10 * *').supported?.must_equal false
Winever::CronTime.new('12 10 * * * *').supported?.must_equal false
end
it "must return a trigger" do
skip unless windows?
Winever::CronTime.new('12 10 * * *').triggers.size.must_equal 1
end
describe "current limitations" do
it "doesn't currently support daily jobs without single specific time" do
Winever::CronTime.new('12 10,20 * * *').supported?.must_equal false
Winever::CronTime.new('*/2 10 * * *').supported?.must_equal false
end
it "doesn't support jobs that are not daily" do
Winever::CronTime.new('12 10 1 * *').supported?.must_equal false
Winever::CronTime.new('12 10 */2 * *').supported?.must_equal false
Winever::CronTime.new('12 10 3,4,5 * *').supported?.must_equal false
Winever::CronTime.new('12 10 * 1 *').supported?.must_equal false
Winever::CronTime.new('12 10 * */2 *').supported?.must_equal false
Winever::CronTime.new('12 10 * 3,4,5 *').supported?.must_equal false
Winever::CronTime.new('12 10 * * 1').supported?.must_equal false
Winever::CronTime.new('12 10 * * */2').supported?.must_equal false
Winever::CronTime.new('12 10 * * 3,4,5').supported?.must_equal false
end
end
end
|
MaxLap/winever
|
lib/winever.rb
|
<reponame>MaxLap/winever
require 'winever/version'
require 'whenever'
# A very tiny monkey patch of Whenever, adding some helper functions in the schedule.
module Whenever
class JobList
# We are running from winever? If you need tasks only on your Windows or your Linux servers, you can use #winever?
def winever?
Winever::WheneverInterface.run_from_winever?
end
# If transitionning to Winever and you already have scheduled tasks that you also want removed when installing
# your Winever schedule, you can give their paths to this functions and Winever will take care of it!
def remove_existing_tasks *names
Winever::WheneverInterface.remove_existing_tasks *names
end
end
end
module Winever
autoload :CommandLine, 'winever/command_line'
autoload :CronEntry, 'winever/cron_entry'
autoload :CronTime, 'winever/cron_time'
autoload :TaskManager, 'winever/task_manager'
autoload :WheneverInterface, 'winever/whenever_interface'
end
|
MaxLap/winever
|
lib/winever/setup_schedule.rb
|
<gh_stars>0
if winever?
set :job_template, "|:job"
# Overwrite to put tasks in a different subfolder of the task scheduler.
# Right now, anything other than \\ will break clear_tasks, so don't change folder for now.
set :task_folder, "\\"
job_type :command, ":task_folder|:task_name||:task :output"
job_type :rake, ":task_folder|:task_name|:path|:bundle_command rake :task --silent :environment_variable=:environment :output"
job_type :script, ":task_folder|:task_name|:path|:bundle_command ruby script/:task :environment_variable=:environment :output"
job_type :runner, ":task_folder|:task_name|:path|ruby :runner_command -e :environment ':task' :output"
end
|
Fizziology/sina-weibo-rules
|
test/test_rule.rb
|
require 'helper'
class TestRule < Test::Unit::TestCase
context "SinaWeibo Rules" do
context "without tags" do
setup do
@rule = SinaWeibo::Rule.new( '"bangor slov"')
end
should "allow creation of quoted rule" do
assert{ @rule.value == "\"bangor slov\"" }
assert{ @rule.to_json == "{\"value\":\"\\\"bangor slov\\\"\"}" }
assert{ JSON.parse( @rule.to_json )["value"] == "\"bangor slov\"" }
end
should 'not have tag key as json' do
assert{ @rule.to_json == "{\"value\":\"\\\"bangor slov\\\"\"}" }
assert{ JSON.parse( @rule.to_json )["tag"].nil? }
end
should 'not have a tag value' do
assert{ @rule.tag.nil? }
end
end
context "with more than 10 phrases" do
setup do
@rule = SinaWeibo::Rule.new('mirror mirror clip -watch -see -project -mirror -relativity -armie -julia -lily -trailer -movie' )
end
should "raise an invalid length error" do
assert{ !@rule.valid? }
end
end
context "with tags" do
setup do
@rule = SinaWeibo::Rule.new( "gorgon" , "scary" )
end
should "convert to json" do
assert{ JSON.parse( @rule.to_json )['tag'] == "scary" }
assert{ JSON.parse( @rule.to_json )['value'] == "gorgon" }
end
should "have a tag value" do
assert{ @rule.tag == "scary" }
end
end
end
end
|
Fizziology/sina-weibo-rules
|
lib/sina-weibo-rules/version.rb
|
module SinaWeibo
VERSION = File.open('VERSION', 'rb') { |f| f.read }
end
|
Fizziology/sina-weibo-rules
|
lib/sina-weibo-rules/api.rb
|
<filename>lib/sina-weibo-rules/api.rb
module SinaWeibo
module API
def add( rules = [] )
options = {queries: rules }
SinaWeibo::Response.new self.class.post('/rules', body: options.to_json)
end
def remove( rules = [] )
options = {queries: rules }
SinaWeibo::Response.new self.class.delete('/rules', body: options.to_json)
end
def list
SinaWeibo::Response.new self.class.get( '/rules' )
end
def delete_all!
rules = self.list.rules
sleep 3
self.remove( rules )
end
end
end
|
Fizziology/sina-weibo-rules
|
lib/sina-weibo-rules.rb
|
require 'active_support'
require 'httparty'
require 'json'
require 'logger'
require 'sina-weibo-rules/api'
require 'sina-weibo-rules/response'
require 'sina-weibo-rules/rule'
module SinaWeibo
class Rules
include HTTParty
include SinaWeibo::API
headers 'Accept' => 'application/json', 'Content-Type' => 'application/json; charset=utf-8'
format :json
def initialize( configuration = nil, username = nil, password = nil, uri = nil, timeout = 60 )
@configuration_file = configuration
unless username && password && uri
load_credentials!
username = @config["username"]
password = @config["password"]
uri = uri || @config["streaming_url"]
end
self.class.basic_auth username , password
self.class.base_uri uri
self.class.default_timeout timeout
end
def default_timeout(timeout)
self.class.default_timeout timeout
self.class.default_options
end
private
def load_credentials!
if File.exists?( @configuration_file )
@config = YAML.load_file( @configuration_file )[environment.to_s]
else
raise Exception.new( <<-RUBY
You must provide a configuration file at config/sina-weibo.yml
development: &development
username: <EMAIL>
password: <PASSWORD>
account: your_account
streaming_url: 'https://api.socialgist.com/keywordapi/YOUR_ACCOUNT/sinaweibo/main'
RUBY
)
end
end
def environment
if defined?(Rails)
Rails.env
elsif defined?(RAILS_ENV)
RAILS_ENV
elsif defined?(RACK_ENV)
RACK_ENV
else
:development
end
end
end
end
|
Fizziology/sina-weibo-rules
|
test/helper.rb
|
<reponame>Fizziology/sina-weibo-rules
require 'rubygems'
require 'bundler'
begin
Bundler.setup(:default, :development)
Bundler.require( :default, :development )
rescue Bundler::BundlerError => e
$stderr.puts e.message
$stderr.puts "Run `bundle install` to install missing gems"
exit e.status_code
end
require 'test/unit'
require 'logger'
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
$LOAD_PATH.unshift(File.dirname(__FILE__))
require 'gnip-rules'
class Test::Unit::TestCase
def logger
Logger.new(STDOUT)
end
end
|
Fizziology/sina-weibo-rules
|
test/test_gnip-rules.rb
|
require 'helper'
class TestSinaWeiboRules < Test::Unit::TestCase
context 'SinaWeibo' do
setup do
@weibo = SinaWeibo::Rules.new
end
should "allow creation of rules" do
response = @weibo.add([SinaWeibo::Rule.new( '"new spyro"' )])
assert{ response.response.class == Net::HTTPCreated }
end
should 'allow removal of rules' do
response = @weibo.remove([SinaWeibo::Rule.new( '"new spyro"' )])
assert{ response.response.class == Net::HTTPOK }
end
should 'allow creation of tagged rules' do
response = @weibo.add([ SinaWeibo::Rule.new('#skylanders -skylanders -spyro', 'Skylanders')])
assert{ response.response.class == Net::HTTPCreated }
end
should 'list all rules' do
response = @weibo.list
assert{ response.response.class == Net::HTTPOK }
end
should 'delete all rules' do
response = @weibo.delete_all!
assert{ response.response.class == Net::HTTPOK }
assert{ @weibo.list["rules"].empty? }
end
end
end
|
Fizziology/sina-weibo-rules
|
lib/sina-weibo-rules/rule.rb
|
module SinaWeibo
class Rule
attr_accessor :value, :tag, :errors
def initialize( v , t = nil )
@value = v
@tag = t
@errors = []
end
def as_json(options={})
o = {"query" => value}
o.merge!( "tag" => tag ) unless tag.nil?
return o
end
def valid?
validate_length
end
private
def validate_length
if @value.length > 1024
@errors << "Too many characters in rule - #{@value.length}. The maximum allowed is 1024"
return false
end
return true
end
def validate_phrase_count
phrases = @value.scan( /(\"[\w\-\s]+\"|\w+\s?)/ ).count
if phrases > 10
@errors << "Too many clauses in phrase - #{phrases}. The maximum allowed is 10"
return false
end
return true
end
end
end
|
Fizziology/sina-weibo-rules
|
lib/sina-weibo-rules/response.rb
|
include Forwardable
module SinaWeibo
class Response
extend Forwardable
def_delegators :@http_party_response, :response, :request, :body, :headers, :code
attr_reader :http_party_response
def initialize(http_party_response)
@http_party_response = http_party_response
end
def rules
http_party_response.parsed_response["message"]["query"]
end
def created?
code == 201
end
def unauthorized?
code == 401
end
def rate_limited?
code == 429
end
def unavailable?
code == 503
end
def bad_request?
code == 400
end
def unprocessable?
code == 422
end
def ok?
code == 200
end
def success?
ok? && http_party_response.parsed_response.has_key?("status") && http_party_response.parsed_response["status"] == "success"
end
def error
http_party_response.parsed_response
end
end
end
|
worlduniting/myotherskills.org
|
app/controllers/dashboard_controller.rb
|
<filename>app/controllers/dashboard_controller.rb
class DashboardController < ApplicationController
before_action :authenticate_user!
def index
@skills = Skill.all
end
end
|
worlduniting/myotherskills.org
|
config/initializers/preferences.rb
|
# Be sure to restart your server when you modify this file.
# All MyOtherSkills preferences are here.
MyOtherSkills::Application.config.application_name = "myotherskills.org"
|
worlduniting/myotherskills.org
|
test/helpers/skills_helper_test.rb
|
require 'test_helper'
class SkillsHelperTest < ActionView::TestCase
end
|
worlduniting/myotherskills.org
|
config/routes.rb
|
MyOtherSkills::Application.routes.draw do
resources :skills
devise_for :users, :controllers => { :registrations => :registrations }
devise_for :admins
get '/token' => 'home#token', as: :token
get '/admins/manage' => 'admins#manage', as: :manage
get '/dashboard' => 'dashboard#index', as: :dashboard
get '/u/:username' => 'users#show', as: :user_profile
resources :home, only: :index
resources :admins
authenticated :user do
root :to => "skills#index", as: :authenticated_root
end
root :to => "home#index"
end
|
worlduniting/myotherskills.org
|
app/controllers/users_controller.rb
|
class UsersController < ApplicationController
def show
@user = User.where(:username => params[:username]).first
end
end
|
worlduniting/myotherskills.org
|
app/views/skills/show.json.jbuilder
|
<filename>app/views/skills/show.json.jbuilder
json.extract! @skill, :id, :name, :experience, :level, :description, :created_at, :updated_at
|
worlduniting/myotherskills.org
|
db/migrate/20140319054205_create_skills.rb
|
class CreateSkills < ActiveRecord::Migration
def change
create_table :skills do |t|
t.string "name"
t.integer "experience"
t.integer "level"
t.text "description"
t.datetime "created_at"
t.datetime "updated_at"
end
end
end
|
worlduniting/myotherskills.org
|
app/controllers/registrations_controller.rb
|
<gh_stars>0
class RegistrationsController < Devise::RegistrationsController
def update
@user = User.find(current_user.id)
successfully_updated = if needs_password?(@user, params)
@user.update_with_password(user_params)
else
# remove the virtual current_password attribute update_without_password
# doesn't know how to ignore it
params[:user].delete(:current_password)
@user.update_without_password(user_params)
end
if successfully_updated
set_flash_message :notice, :updated
# Sign in the user bypassing validation in case his password changed
sign_in @user, :bypass => true
redirect_to after_update_path_for(@user)
else
render "edit"
end
end
private
# check if we need password to update user data
# ie if password or email was changed
# extend this as needed
def needs_password?(user, params)
user.email != params[:user][:email] ||
params[:user][:password].present? ||
user.username != params[:user][:username]
end
# Using a private method to encapsulate the permissible parameters is
# just a good pattern since you'll be able to reuse the same permit
# list between create and update. Also, you can specialize this method
# with per-user checking of permissible attributes.
def user_params
params.require(:user).permit(:avatar, :email, :current_password, :password, :password_confirmation, :username)
end
end
|
worlduniting/myotherskills.org
|
db/migrate/20140319021156_add_postal_code_to_users.rb
|
<filename>db/migrate/20140319021156_add_postal_code_to_users.rb
class AddPostalCodeToUsers < ActiveRecord::Migration
def change
# Postal Code for Geo-location of users
add_column :users, :postal_code, :string
end
end
|
worlduniting/myotherskills.org
|
app/controllers/home_controller.rb
|
class HomeController < ApplicationController
before_action :authenticate_user!, only: :token
def token
end
end
|
worlduniting/myotherskills.org
|
app/controllers/admins_controller.rb
|
class AdminsController < ApplicationController
before_action :authenticate_admin!
def manage
end
end
|
nerdgeschoss/pixelpress
|
lib/pixelpress/renderers/test_renderer.rb
|
<gh_stars>1-10
class Pixelpress::TestRenderer
def render(html)
File.binread(File.join(__dir__, "test.pdf"))
end
end
|
nerdgeschoss/pixelpress
|
lib/pixelpress/base.rb
|
require 'action_controller'
require_relative 'renderers/weasyprint_renderer'
require_relative 'renderers/test_renderer'
require_relative 'instance_invocation'
require_relative 'rendering'
module Pixelpress
class Base < ActionController::Base
extend InstanceInvocation
include Rendering
end
end
|
nerdgeschoss/pixelpress
|
lib/generators/rspec/templates/printer_spec.rb
|
<filename>lib/generators/rspec/templates/printer_spec.rb
require '<%= File.exists?('spec/rails_helper.rb') ? 'rails_helper' : 'spec_helper' %>'
describe <%= class_name %>Printer, pending: true do
it "should be tested" do
raise "implement your rspec tests"
end
end
|
nerdgeschoss/pixelpress
|
lib/pixelpress/rendering.rb
|
module Pixelpress
module Rendering
module ClassMethods
attr_writer :default_renderer
def default_renderer
@default_renderer ||= WeasyPrintRenderer.new
end
end
def self.included(base)
base.extend(ClassMethods)
end
def document
@document ||= Document.new render_to_string(template), renderer, file_name: try(:file_name)
end
protected
def renderer
@renderer || self.class.default_renderer
end
def template
['printers', controller_path.sub('_printer', ''), @template_name].join('/')
end
end
end
|
nerdgeschoss/pixelpress
|
lib/pixelpress/preview.rb
|
module Pixelpress
class Preview
def self.all
Dir[Rails.root.join('spec', 'printers', 'previews', '**', '*_preview.rb')].map do |file|
require_dependency file
file.split('printers/previews/').last.sub('.rb', '').classify.constantize.new
end
end
def previews
methods - Object.methods - [:previews, :printer_name]
end
def printer_name
self.class.name.underscore.tr('/', '_')
end
end
end
|
nerdgeschoss/pixelpress
|
spec/lib/generators/test/generator_spec/printer_generator_spec.rb
|
<gh_stars>1-10
require 'spec_helper'
RSpec.describe Pixelpress::Generators::PrinterGenerator do
path = File.expand_path('../test_destination', __FILE__)
destination path
arguments %w(Auth::UserPrinter user_data)
before(:each) do
prepare_destination
allow(Rails).to receive(:root).and_return Pathname.new(path)
FileUtils.mkdir_p "#{path}/config"
File.write("#{path}/config/routes.rb", 'mount Pixelpress::Engine => "rails" if Rails.env.development?')
run_generator
end
it 'generates all printer files in app folder' do
assert_file 'app/printers/application_printer.rb'
end
it 'test double suffix printer' do
assert_file 'app/printers/auth/user_printer.rb'
end
end
|
nerdgeschoss/pixelpress
|
app/controllers/pixelpress/printers_controller.rb
|
class Pixelpress::PrintersController < ActionController::Base
def index
@printers = Pixelpress::Preview.all
end
def show
klass = params[:printer_id]
method = params[:id]
printer = Pixelpress::Preview.all.map { |e| [e.printer_name, e] }.to_h[klass].send(method)
respond_to do |format|
format.html { render html: printer.html }
format.pdf { send_data printer.pdf.read, disposition: 'inline', type: 'application/pdf' }
end
end
end
|
nerdgeschoss/pixelpress
|
lib/pixelpress/fake_file.rb
|
<gh_stars>1-10
module Pixelpress
class FakeFile < StringIO
attr_accessor :original_filename, :content_type
def initialize(data, options = {})
@original_filename = options[:original_filename]
@content_type = options[:content_type]
super data
end
def original_filename
@original_filename || "document.pdf"
end
def content_type
@content_type || "application/pdf"
end
end
end
|
nerdgeschoss/pixelpress
|
lib/generators/pixelpress/printer/templates/printer.rb
|
<% module_namespacing do -%>
class <%= class_name %>Printer < ApplicationPrinter<% passed_methods.each do |m| %>
def <%= m %>
#put your code here, if you want :)
end<% end %>
end
<% end -%>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.