diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index 70e22a1..0000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "ImportPath": "github.com/KunTengRom/xfrps", - "GoVersion": "go1.8", - "GodepVersion": "v79", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/davecgh/go-spew/spew", - "Comment": "v1.1.0", - "Rev": "346938d642f2ec3594ed81d874461961cd0faa76" - }, - { - "ImportPath": "github.com/docopt/docopt-go", - "Comment": "0.6.2", - "Rev": "784ddc588536785e7299f7272f39101f7faccc3f" - }, - { - "ImportPath": "github.com/fatedier/beego/logs", - "Comment": "v1.7.2-72-gf73c369", - "Rev": "f73c3692bbd70a83728cb59b2c0423ff95e4ecea" - }, - { - "ImportPath": "github.com/golang/snappy", - "Rev": "5979233c5d6225d4a8e438cdd0b411888449ddab" - }, - { - "ImportPath": "github.com/julienschmidt/httprouter", - "Comment": "v1.1-41-g8a45e95", - "Rev": "8a45e95fc75cb77048068a62daed98cc22fdac7c" - }, - { - "ImportPath": "github.com/pkg/errors", - "Comment": "v0.8.0-5-gc605e28", - "Rev": "c605e284fe17294bda444b34710735b29d1a9d90" - }, - { - "ImportPath": "github.com/pmezard/go-difflib/difflib", - "Comment": "v1.0.0", - "Rev": "792786c7400a136282c1664665ae0a8db921c6c2" - }, - { - "ImportPath": "github.com/rakyll/statik/fs", - "Comment": "v0.1.0", - "Rev": "274df120e9065bdd08eb1120e0375e3dc1ae8465" - }, - { - "ImportPath": "github.com/stretchr/testify/assert", - "Comment": "v1.1.4-25-g2402e8e", - "Rev": "2402e8e7a02fc811447d11f881aa9746cdc57983" - }, - { - "ImportPath": "github.com/vaughan0/go-ini", - "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1" - }, - { - "ImportPath": "github.com/xtaci/smux", - "Comment": "v1.0.5-8-g2de5471", - "Rev": "2de5471dfcbc029f5fe1392b83fe784127c4943e" - }, - { - "ImportPath": "golang.org/x/crypto/pbkdf2", - "Rev": "1f22c0103821b9390939b6776727195525381532" - } - ] -} diff --git a/Godeps/Readme b/Godeps/Readme deleted file mode 100644 index 4cdaa53..0000000 --- a/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Makefile b/Makefile index 0eeb2e4..56eabcd 100644 --- a/Makefile +++ b/Makefile @@ -18,11 +18,11 @@ fmt: go fmt ./... xfrps: - go build -o bin/xfrps ./cmd/frps + go build -o bin/xfrps ./cmd/xfrps @cp -rf ./assets/static ./bin xfrpc: - go build -o bin/xfrpc ./cmd/frpc + go build -o bin/xfrpc ./cmd/xfrpc test: gotest diff --git a/Makefile.cross-compiles b/Makefile.cross-compiles index aa981b6..b161d18 100644 --- a/Makefile.cross-compiles +++ b/Makefile.cross-compiles @@ -7,26 +7,26 @@ all: build build: app app: - env CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_darwin_amd64 ./cmd/frpc - env CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_darwin_amd64 ./cmd/frps - env CGO_ENABLED=0 GOOS=linux GOARCH=386 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_386 ./cmd/frpc - env CGO_ENABLED=0 GOOS=linux GOARCH=386 go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_386 ./cmd/frps - env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_amd64 ./cmd/frpc - env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_amd64 ./cmd/frps - env CGO_ENABLED=0 GOOS=linux GOARCH=arm go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_arm ./cmd/frpc - env CGO_ENABLED=0 GOOS=linux GOARCH=arm go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_arm ./cmd/frps - env CGO_ENABLED=0 GOOS=windows GOARCH=386 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_windows_386.exe ./cmd/frpc - env CGO_ENABLED=0 GOOS=windows GOARCH=386 go build -ldflags "$(LDFLAGS)" -o ./xfrps_windows_386.exe ./cmd/frps - env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_windows_amd64.exe ./cmd/frpc - env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_windows_amd64.exe ./cmd/frps - env CGO_ENABLED=0 GOOS=linux GOARCH=mips64 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_mips64 ./cmd/frpc - env CGO_ENABLED=0 GOOS=linux GOARCH=mips64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_mips64 ./cmd/frps - env CGO_ENABLED=0 GOOS=linux GOARCH=mips64le go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_mips64le ./cmd/frpc - env CGO_ENABLED=0 GOOS=linux GOARCH=mips64le go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_mips64le ./cmd/frps - env CGO_ENABLED=0 GOOS=linux GOARCH=mips go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_mips ./cmd/frpc - env CGO_ENABLED=0 GOOS=linux GOARCH=mips go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_mips ./cmd/frps - env CGO_ENABLED=0 GOOS=linux GOARCH=mipsle go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_mipsle ./cmd/frpc - env CGO_ENABLED=0 GOOS=linux GOARCH=mipsle go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_mipsle ./cmd/frps + env CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_darwin_amd64 ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_darwin_amd64 ./cmd/xfrps + env CGO_ENABLED=0 GOOS=linux GOARCH=386 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_386 ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=linux GOARCH=386 go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_386 ./cmd/xfrps + env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_amd64 ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_amd64 ./cmd/xfrps + env CGO_ENABLED=0 GOOS=linux GOARCH=arm go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_arm ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=linux GOARCH=arm go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_arm ./cmd/xfrps + env CGO_ENABLED=0 GOOS=windows GOARCH=386 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_windows_386.exe ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=windows GOARCH=386 go build -ldflags "$(LDFLAGS)" -o ./xfrps_windows_386.exe ./cmd/xfrps + env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_windows_amd64.exe ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_windows_amd64.exe ./cmd/xfrps + env CGO_ENABLED=0 GOOS=linux GOARCH=mips64 go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_mips64 ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=linux GOARCH=mips64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_mips64 ./cmd/xfrps + env CGO_ENABLED=0 GOOS=linux GOARCH=mips64le go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_mips64le ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=linux GOARCH=mips64le go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_mips64le ./cmd/xfrps + env CGO_ENABLED=0 GOOS=linux GOARCH=mips go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_mips ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=linux GOARCH=mips go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_mips ./cmd/xfrps + env CGO_ENABLED=0 GOOS=linux GOARCH=mipsle go build -ldflags "$(LDFLAGS)" -o ./xfrpc_linux_mipsle ./cmd/xfrpc + env CGO_ENABLED=0 GOOS=linux GOARCH=mipsle go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_mipsle ./cmd/xfrps temp: - env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_amd64 ./cmd/frps + env CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o ./xfrps_linux_amd64 ./cmd/xfrps diff --git a/assets/assets.go b/assets/assets.go index 7eb264d..d093fa9 100644 --- a/assets/assets.go +++ b/assets/assets.go @@ -25,7 +25,7 @@ import ( "github.com/rakyll/statik/fs" - _ "github.com/KunTengRom/xfrps/assets/statik" + _ "github.com/liudf0716/xfrps/assets/statik" ) var ( diff --git a/assets/static/index.js b/assets/static/index.js index cf29e26..8fa0145 100644 --- a/assets/static/index.js +++ b/assets/static/index.js @@ -16,7 +16,7 @@ var S=i(13),A=i(477),M=i(251),k=i(25),T=i(478),I=i(12),C=i(14),P=i(95),D=i(33),L * LICENSE * https://github.com/ecomfe/zrender/blob/master/LICENSE.txt */ -var o=i(183),r=i(13),a=i(0),s=i(548),l=i(551),c=i(552),u=i(559),h=!r.canvasSupported,d={canvas:i(550)},p={},f={};f.version="3.6.0",f.init=function(e,t){var i=new g(o(),e,t);return p[i.id]=i,i},f.dispose=function(e){if(e)e.dispose();else{for(var t in p)p.hasOwnProperty(t)&&p[t].dispose();p={}}return f},f.getInstance=function(e){return p[e]},f.registerPainter=function(e,t){d[e]=t};var g=function(e,t,i){i=i||{},this.dom=t,this.id=e;var n=this,o=new l,p=i.renderer;if(h){if(!d.vml)throw new Error("You need to require 'zrender/vml/vml' to support IE8");p="vml"}else p&&d[p]||(p="canvas");var f=new d[p](t,o,i);this.storage=o,this.painter=f;var g=r.node?null:new u(f.getViewportRoot());this.handler=new s(o,f,g,f.root),this.animation=new c({stage:{update:a.bind(this.flush,this)}}),this.animation.start(),this._needsRefresh;var m=o.delFromStorage,v=o.addToStorage;o.delFromStorage=function(e){m.call(o,e),e&&e.removeSelfFromZr(n)},o.addToStorage=function(e){v.call(o,e),e.addSelfToZr(n)}};g.prototype={constructor:g,getId:function(){return this.id},add:function(e){this.storage.addRoot(e),this._needsRefresh=!0},remove:function(e){this.storage.delRoot(e),this._needsRefresh=!0},configLayer:function(e,t){this.painter.configLayer(e,t),this._needsRefresh=!0},refreshImmediately:function(){this._needsRefresh=!1,this.painter.refresh(),this._needsRefresh=!1},refresh:function(){this._needsRefresh=!0},flush:function(){this._needsRefresh&&this.refreshImmediately(),this._needsRefreshHover&&this.refreshHoverImmediately()},addHover:function(e,t){this.painter.addHover&&(this.painter.addHover(e,t),this.refreshHover())},removeHover:function(e){this.painter.removeHover&&(this.painter.removeHover(e),this.refreshHover())},clearHover:function(){this.painter.clearHover&&(this.painter.clearHover(),this.refreshHover())},refreshHover:function(){this._needsRefreshHover=!0},refreshHoverImmediately:function(){this._needsRefreshHover=!1,this.painter.refreshHover&&this.painter.refreshHover()},resize:function(e){e=e||{},this.painter.resize(e.width,e.height),this.handler.resize()},clearAnimation:function(){this.animation.clear()},getWidth:function(){return this.painter.getWidth()},getHeight:function(){return this.painter.getHeight()},pathToImage:function(e,t){return this.painter.pathToImage(e,t)},setCursorStyle:function(e){this.handler.setCursorStyle(e)},findHover:function(e,t){return this.handler.findHover(e,t)},on:function(e,t,i){this.handler.on(e,t,i)},off:function(e,t){this.handler.off(e,t)},trigger:function(e,t){this.handler.trigger(e,t)},clear:function(){this.storage.delRoot(),this.painter.clear()},dispose:function(){this.animation.stop(),this.clear(),this.storage.dispose(),this.painter.dispose(),this.handler.dispose(),this.animation=this.storage=this.painter=this.handler=null,n(this.id)}},e.exports=f},function(e,t,i){"use strict";var n=i(17),o=i(535),r=i(517),a=i.n(r),s=i(521),l=i.n(s),c=i(522),u=i.n(c),h=i(518),d=i.n(h),p=i(519),f=i.n(p),g=i(520),m=i.n(g),v=i(516),x=i.n(v),y=i(515),_=i.n(y),b=i(523),w=i.n(b);n.default.use(o.a),t.a=new o.a({routes:[{path:"/",name:"Overview",component:a.a},{path:"/client/offline",name:"OfflineClient",component:_.a},{path:"/client/online",name:"OnlineClient",component:x.a},{path:"/proxies/tcp",name:"ProxiesTcp",component:l.a},{path:"/proxies/udp",name:"ProxiesUdp",component:u.a},{path:"/proxies/ftp",name:"ProxiesFtp",component:d.a},{path:"/proxies/http",name:"ProxiesHttp",component:f.a},{path:"/proxies/https",name:"ProxiesHttps",component:m.a},{path:"/search",name:"Search",component:w.a}]})},function(e,t,i){var n=i(236);"string"==typeof n&&(n=[[e.i,n,""]]);i(172)(n,{});n.locals&&(e.exports=n.locals)},function(e,t,i){var n=i(237);"string"==typeof n&&(n=[[e.i,n,""]]);i(172)(n,{});n.locals&&(e.exports=n.locals)},function(e,t,i){i(539);var n=i(20)(i(221),i(527),null,null);e.exports=n.exports},function(e,t){!function(e){"use strict";function t(e){if("string"!=typeof e&&(e=String(e)),/[^a-z0-9\-#$%&'*+.\^_`|~]/i.test(e))throw new TypeError("Invalid character in header field name");return e.toLowerCase()}function i(e){return"string"!=typeof e&&(e=String(e)),e}function n(e){var t={next:function(){var t=e.shift();return{done:void 0===t,value:t}}};return v.iterable&&(t[Symbol.iterator]=function(){return t}),t}function o(e){this.map={},e instanceof o?e.forEach(function(e,t){this.append(t,e)},this):Array.isArray(e)?e.forEach(function(e){this.append(e[0],e[1])},this):e&&Object.getOwnPropertyNames(e).forEach(function(t){this.append(t,e[t])},this)}function r(e){if(e.bodyUsed)return Promise.reject(new TypeError("Already read"));e.bodyUsed=!0}function a(e){return new Promise(function(t,i){e.onload=function(){t(e.result)},e.onerror=function(){i(e.error)}})}function s(e){var t=new FileReader,i=a(t);return t.readAsArrayBuffer(e),i}function l(e){var t=new FileReader,i=a(t);return t.readAsText(e),i}function c(e){for(var t=new Uint8Array(e),i=new Array(t.length),n=0;n-1?t:e}function p(e,t){t=t||{};var i=t.body;if(e instanceof p){if(e.bodyUsed)throw new TypeError("Already read");this.url=e.url,this.credentials=e.credentials,t.headers||(this.headers=new o(e.headers)),this.method=e.method,this.mode=e.mode,i||null==e._bodyInit||(i=e._bodyInit,e.bodyUsed=!0)}else this.url=String(e);if(this.credentials=t.credentials||this.credentials||"omit",!t.headers&&this.headers||(this.headers=new o(t.headers)),this.method=d(t.method||this.method||"GET"),this.mode=t.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&i)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(i)}function f(e){var t=new FormData;return e.trim().split("&").forEach(function(e){if(e){var i=e.split("="),n=i.shift().replace(/\+/g," "),o=i.join("=").replace(/\+/g," ");t.append(decodeURIComponent(n),decodeURIComponent(o))}}),t}function g(e){var t=new o;return e.split(/\r?\n/).forEach(function(e){var i=e.split(":"),n=i.shift().trim();if(n){var o=i.join(":").trim();t.append(n,o)}}),t}function m(e,t){t||(t={}),this.type="default",this.status="status"in t?t.status:200,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in t?t.statusText:"OK",this.headers=new o(t.headers),this.url=t.url||"",this._initBody(e)}if(!e.fetch){var v={searchParams:"URLSearchParams"in e,iterable:"Symbol"in e&&"iterator"in Symbol,blob:"FileReader"in e&&"Blob"in e&&function(){try{return new Blob,!0}catch(e){return!1}}(),formData:"FormData"in e,arrayBuffer:"ArrayBuffer"in e};if(v.arrayBuffer)var x=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],y=function(e){return e&&DataView.prototype.isPrototypeOf(e)},_=ArrayBuffer.isView||function(e){return e&&x.indexOf(Object.prototype.toString.call(e))>-1};o.prototype.append=function(e,n){e=t(e),n=i(n);var o=this.map[e];this.map[e]=o?o+","+n:n},o.prototype.delete=function(e){delete this.map[t(e)]},o.prototype.get=function(e){return e=t(e),this.has(e)?this.map[e]:null},o.prototype.has=function(e){return this.map.hasOwnProperty(t(e))},o.prototype.set=function(e,n){this.map[t(e)]=i(n)},o.prototype.forEach=function(e,t){for(var i in this.map)this.map.hasOwnProperty(i)&&e.call(t,this.map[i],i,this)},o.prototype.keys=function(){var e=[];return this.forEach(function(t,i){e.push(i)}),n(e)},o.prototype.values=function(){var e=[];return this.forEach(function(t){e.push(t)}),n(e)},o.prototype.entries=function(){var e=[];return this.forEach(function(t,i){e.push([i,t])}),n(e)},v.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var b=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];p.prototype.clone=function(){return new p(this,{body:this._bodyInit})},h.call(p.prototype),h.call(m.prototype),m.prototype.clone=function(){return new m(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},m.error=function(){var e=new m(null,{status:0,statusText:""});return e.type="error",e};var w=[301,302,303,307,308];m.redirect=function(e,t){if(-1===w.indexOf(t))throw new RangeError("Invalid status code");return new m(null,{status:t,headers:{location:e}})},e.Headers=o,e.Request=p,e.Response=m,e.fetch=function(e,t){return new Promise(function(i,n){var o=new p(e,t),r=new XMLHttpRequest;r.onload=function(){var e={status:r.status,statusText:r.statusText,headers:g(r.getAllResponseHeaders()||"")};e.url="responseURL"in r?r.responseURL:e.headers.get("X-Request-URL");var t="response"in r?r.response:r.responseText;i(new m(t,e))},r.onerror=function(){n(new TypeError("Network request failed"))},r.ontimeout=function(){n(new TypeError("Network request failed"))},r.open(o.method,o.url,!0),"include"===o.credentials&&(r.withCredentials=!0),"responseType"in r&&v.blob&&(r.responseType="blob"),o.headers.forEach(function(e,t){r.setRequestHeader(t,e)}),r.send(void 0===o._bodyInit?null:o._bodyInit)})},e.fetch.polyfill=!0}}("undefined"!=typeof self?self:this)},,,,,,,,,,,,,,,,,,,,,,,function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={methods:{handleSelect:function(e,t){""==e&&window.open("https://github.com/KunTengRom/xfrps")}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=(i.n(n),i(40)),r=i.n(o),a=i(111);t.default={data:function(){return{clients:null,parentTotalPage:1,parentCurrentPage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},components:{pagination:r.a},methods:{fetchData:function(e){var t=this;0==e?fetch("/api/client/offline",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentTotalPage=e.total_page,t.parentCurrentPage=1,fetch("/api/client/offline/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.clients=new Array;var i=!0,n=!1,o=void 0;try{for(var r,s=e.clients[Symbol.iterator]();!(i=(r=s.next()).done);i=!0){var l=r.value;t.clients.push(new a.a(l))}}catch(e){n=!0,o=e}finally{try{!i&&s.return&&s.return()}finally{if(n)throw o}}})}):fetch("/api/client/offline/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.clients=new Array;var i=!0,n=!1,o=void 0;try{for(var r,s=e.clients[Symbol.iterator]();!(i=(r=s.next()).done);i=!0){var l=r.value;t.clients.push(new a.a(l))}}catch(e){n=!0,o=e}finally{try{!i&&s.return&&s.return()}finally{if(n)throw o}}})}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=(i.n(n),i(40)),r=i.n(o),a=i(111);t.default={data:function(){return{clients:null,parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},components:{pagination:r.a},methods:{fetchData:function(e){var t=this;0==e?fetch("/api/client/online",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentTotalPage=e.total_page,t.parentCurrentPage=1,fetch("/api/client/online/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.clients=new Array;var i=!0,n=!1,o=void 0;try{for(var r,s=e.clients[Symbol.iterator]();!(i=(r=s.next()).done);i=!0){var l=r.value;t.clients.push(new a.a(l))}}catch(e){n=!0,o=e}finally{try{!i&&s.return&&s.return()}finally{if(n)throw o}}})}):fetch("/api/client/online/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.clients=new Array;var i=!0,n=!1,o=void 0;try{for(var r,s=e.clients[Symbol.iterator]();!(i=(r=s.next()).done);i=!0){var l=r.value;t.clients.push(new a.a(l))}}catch(e){n=!0,o=e}finally{try{!i&&s.return&&s.return()}finally{if(n)throw o}}})}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(110);t.default={data:function(){return{version:"",vhost_http_port:"",vhost_https_port:"",auth_timeout:"",subdomain_host:"",max_pool_count:"",heart_beat_timeout:"",client_counts:"",cur_conns:"",proxy_counts:""}},created:function(){this.fetchData()},watch:{$route:"fetchData"},methods:{fetchData:function(){var e=this;fetch("/api/serverinfo",{credentials:"include"}).then(function(e){return e.json()}).then(function(t){e.version=t.version,e.vhost_http_port=t.vhost_http_port,0==e.vhost_http_port&&(e.vhost_http_port="disable"),e.vhost_https_port=t.vhost_https_port,0==e.vhost_https_port&&(e.vhost_https_port="disable"),e.auth_timeout=t.auth_timeout,e.subdomain_host=t.subdomain_host,e.max_pool_count=t.max_pool_count,e.heart_beat_timeout=t.heart_beat_timeout,e.client_counts=t.client_counts,e.cur_conns=t.cur_conns,e.proxy_counts=0,null!=t.proxy_type_count&&(null!=t.proxy_type_count.tcp&&(e.proxy_counts+=t.proxy_type_count.tcp),null!=t.proxy_type_count.udp&&(e.proxy_counts+=t.proxy_type_count.udp),null!=t.proxy_type_count.ftp&&(e.proxy_counts+=t.proxy_type_count.ftp),null!=t.proxy_type_count.http&&(e.proxy_counts+=t.proxy_type_count.http),null!=t.proxy_type_count.https&&(e.proxy_counts+=t.proxy_type_count.https)),i.i(n.b)("traffic",t.total_traffic_in,t.total_traffic_out),i.i(n.c)("proxies",t)}).catch(function(t){e.$message({showClose:!0,message:"Get server info from frps failed!",type:"warning"})})}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(58),a=i.n(r),s=i(40),l=i.n(s),c=i(50);t.default={data:function(){return{proxies:null,parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;0==e?fetch("/api/proxy/ftp",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentCurrentpage=1,t.parentTotalPage=e.total_page,fetch("/api/proxy/ftp/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.b(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(this.parentCurrentpage=e,fetch("/api/proxy/ftp/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.b(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}}))}},components:{"my-traffic-chart":a.a,pagination:l.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(40),a=i.n(r),s=i(58),l=i.n(s),c=i(50);t.default={data:function(){return{proxies:null,vhost_http_port:"",subdomain_host:"",parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;fetch("/api/serverinfo",{credentials:"include"}).then(function(e){return e.json()}).then(function(i){t.vhost_http_port=i.vhost_http_port,t.subdomain_host=i.subdomain_host,null!=t.vhost_http_port&&0!=t.vhost_http_port&&(0==e?fetch("/api/proxy/http",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentTotalPage=e.total_page,t.parentCurrentPage=1,fetch("/api/proxy/http/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.a(s,t.vhost_http_port,t.subdomain_host))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(t.parentCurrentpage=e,fetch("/api/proxy/http/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.a(s,t.vhost_http_port,t.subdomain_host))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})))})}},components:{"my-traffic-chart":l.a,pagination:a.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(40),a=i.n(r),s=i(58),l=i.n(s);i(50);t.default={data:function(){return{proxies:null,vhost_https_port:"",subdomain_host:"",parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;fetch("/api/serverinfo",{credentials:"include"}).then(function(e){return e.json()}).then(function(i){t.vhost_https_port=i.vhost_https_port,t.subdomain_host=i.subdomain_host,null!=t.vhost_https_port&&0!=t.vhost_https_port&&(0==e?fetch("/api/proxy/https",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentTotalPage=e.total_page,t.parentCurrentPage=1,fetch("/api/proxy/https/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new HttpsProxy(s,t.vhost_https_port,t.subdomain_host))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(t.parentCurrentpage=e,fetch("/api/proxy/https/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new HttpsProxy(s,t.vhost_https_port,t.subdomain_host))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})))})}},components:{"my-traffic-chart":l.a,pagination:a.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(58),a=i.n(r),s=i(40),l=i.n(s),c=i(50);t.default={data:function(){return{proxies:null,parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;0==e?fetch("/api/proxy/tcp",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentCurrentpage=1,t.parentTotalPage=e.total_page,fetch("/api/proxy/tcp/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.d(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(this.parentCurrentpage=e,fetch("/api/proxy/tcp/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.d(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}}))}},components:{"my-traffic-chart":a.a,pagination:l.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(58),a=i.n(r),s=i(40),l=i.n(s),c=i(50);t.default={data:function(){return{proxies:null,parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;0==e?fetch("/api/proxy/udp",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentCurrentpage=1,t.parentTotalPage=e.total_page,fetch("/api/proxy/udp/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.c(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(this.parentCurrentpage=e,fetch("/api/proxy/udp/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.c(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}}))}},components:{"my-traffic-chart":a.a,pagination:l.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(110);t.default={props:["proxy_name"],created:function(){this.fetchData()},methods:{fetchData:function(){var e=this,t="/api/proxy/traffic/"+this.proxy_name;fetch(t,{credentials:"include"}).then(function(e){return e.json()}).then(function(t){i.i(n.a)(e.proxy_name,t.traffic_in,t.traffic_out)}).catch(function(t){e.$message({showClose:!0,message:"Get server info from frps failed!"+t,type:"warning"})})}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={props:{totalPage:{type:Number,default:1,required:!0,validator:function(e){return e>=0}},currentPage:{type:Number,default:1,validator:function(e){return e>=0}},changeCallback:{type:Function,default:function(e){console.log("默认回调,显示页码:"+e)}}},data:function(){return{myCurrentPage:1,isPageNumberError:!1}},computed:{currentPage:function(){return this.myCurrentPage}},methods:{turnToPage:function(e){var t=this,e=parseInt(e);if(!e||e>t.totalPage||e<1)return console.log("页码输入有误!"),t.isPageNumberError=!0,!1;t.isPageNumberError=!1,t.myCurrentPage=e,t.changeCallback(e)}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(17),o=i(76),r=i.n(o),a=i(195),s=(i.n(a),i(196)),l=(i.n(s),i(197)),c=i.n(l),u=i(194),h=i(198);i.n(h);n.default.use(r.a),n.default.config.productionTip=!1,new n.default({el:"#app",router:u.a,template:"",components:{App:c.a}})},,function(e,t,i){"use strict";function n(e){var t=e.length;if(t%4>0)throw new Error("Invalid string. Length must be a multiple of 4");return"="===e[t-2]?2:"="===e[t-1]?1:0}function o(e){return 3*e.length/4-n(e)}function r(e){var t,i,o,r,a,s=e.length;r=n(e),a=new h(3*s/4-r),i=r>0?s-4:s;var l=0;for(t=0;t>16&255,a[l++]=o>>8&255,a[l++]=255&o;return 2===r?(o=u[e.charCodeAt(t)]<<2|u[e.charCodeAt(t+1)]>>4,a[l++]=255&o):1===r&&(o=u[e.charCodeAt(t)]<<10|u[e.charCodeAt(t+1)]<<4|u[e.charCodeAt(t+2)]>>2,a[l++]=o>>8&255,a[l++]=255&o),a}function a(e){return c[e>>18&63]+c[e>>12&63]+c[e>>6&63]+c[63&e]}function s(e,t,i){for(var n,o=[],r=t;rl?l:a+16383));return 1===n?(t=e[i-1],o+=c[t>>2],o+=c[t<<4&63],o+="=="):2===n&&(t=(e[i-2]<<8)+e[i-1],o+=c[t>>10],o+=c[t>>4&63],o+=c[t<<2&63],o+="="),r.push(o),r.join("")}t.byteLength=o,t.toByteArray=r,t.fromByteArray=l;for(var c=[],u=[],h="undefined"!=typeof Uint8Array?Uint8Array:Array,d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",p=0,f=d.length;p=n())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+n().toString(16)+" bytes");return 0|e}function g(e){return+e!=e&&(e=0),r.alloc(+e)}function m(e,t){if(r.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var i=e.length;if(0===i)return 0;for(var n=!1;;)switch(t){case"ascii":case"latin1":case"binary":return i;case"utf8":case"utf-8":case void 0:return j(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*i;case"hex":return i>>>1;case"base64":return Y(e).length;default:if(n)return j(e).length;t=(""+t).toLowerCase(),n=!0}}function v(e,t,i){var n=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===i||i>this.length)&&(i=this.length),i<=0)return"";if(i>>>=0,t>>>=0,i<=t)return"";for(e||(e="utf8");;)switch(e){case"hex":return L(this,t,i);case"utf8":case"utf-8":return I(this,t,i);case"ascii":return P(this,t,i);case"latin1":case"binary":return D(this,t,i);case"base64":return T(this,t,i);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return z(this,t,i);default:if(n)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),n=!0}}function x(e,t,i){var n=e[t];e[t]=e[i],e[i]=n}function y(e,t,i,n,o){if(0===e.length)return-1;if("string"==typeof i?(n=i,i=0):i>2147483647?i=2147483647:i<-2147483648&&(i=-2147483648),i=+i,isNaN(i)&&(i=o?0:e.length-1),i<0&&(i=e.length+i),i>=e.length){if(o)return-1;i=e.length-1}else if(i<0){if(!o)return-1;i=0}if("string"==typeof t&&(t=r.from(t,n)),r.isBuffer(t))return 0===t.length?-1:_(e,t,i,n,o);if("number"==typeof t)return t&=255,r.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?o?Uint8Array.prototype.indexOf.call(e,t,i):Uint8Array.prototype.lastIndexOf.call(e,t,i):_(e,[t],i,n,o);throw new TypeError("val must be string, number or Buffer")}function _(e,t,i,n,o){function r(e,t){return 1===a?e[t]:e.readUInt16BE(t*a)}var a=1,s=e.length,l=t.length;if(void 0!==n&&("ucs2"===(n=String(n).toLowerCase())||"ucs-2"===n||"utf16le"===n||"utf-16le"===n)){if(e.length<2||t.length<2)return-1;a=2,s/=2,l/=2,i/=2}var c;if(o){var u=-1;for(c=i;cs&&(i=s-l),c=i;c>=0;c--){for(var h=!0,d=0;do&&(n=o):n=o;var r=t.length;if(r%2!=0)throw new TypeError("Invalid hex string");n>r/2&&(n=r/2);for(var a=0;a239?4:r>223?3:r>191?2:1;if(o+s<=i){var l,c,u,h;switch(s){case 1:r<128&&(a=r);break;case 2:l=e[o+1],128==(192&l)&&(h=(31&r)<<6|63&l)>127&&(a=h);break;case 3:l=e[o+1],c=e[o+2],128==(192&l)&&128==(192&c)&&(h=(15&r)<<12|(63&l)<<6|63&c)>2047&&(h<55296||h>57343)&&(a=h);break;case 4:l=e[o+1],c=e[o+2],u=e[o+3],128==(192&l)&&128==(192&c)&&128==(192&u)&&(h=(15&r)<<18|(63&l)<<12|(63&c)<<6|63&u)>65535&&h<1114112&&(a=h)}}null===a?(a=65533,s=1):a>65535&&(a-=65536,n.push(a>>>10&1023|55296),a=56320|1023&a),n.push(a),o+=s}return C(n)}function C(e){var t=e.length;if(t<=$)return String.fromCharCode.apply(String,e);for(var i="",n=0;nn)&&(i=n);for(var o="",r=t;ri)throw new RangeError("Trying to access beyond buffer length")}function O(e,t,i,n,o,a){if(!r.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>o||te.length)throw new RangeError("Index out of range")}function R(e,t,i,n){t<0&&(t=65535+t+1);for(var o=0,r=Math.min(e.length-i,2);o>>8*(n?o:1-o)}function N(e,t,i,n){t<0&&(t=4294967295+t+1);for(var o=0,r=Math.min(e.length-i,4);o>>8*(n?o:3-o)&255}function B(e,t,i,n,o,r){if(i+n>e.length)throw new RangeError("Index out of range");if(i<0)throw new RangeError("Index out of range")}function V(e,t,i,n,o){return o||B(e,t,i,4,3.4028234663852886e38,-3.4028234663852886e38),J.write(e,t,i,n,23,4),i+4}function G(e,t,i,n,o){return o||B(e,t,i,8,1.7976931348623157e308,-1.7976931348623157e308),J.write(e,t,i,n,52,8),i+8}function H(e){if(e=F(e).replace(ee,""),e.length<2)return"";for(;e.length%4!=0;)e+="=";return e}function F(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}function W(e){return e<16?"0"+e.toString(16):e.toString(16)}function j(e,t){t=t||1/0;for(var i,n=e.length,o=null,r=[],a=0;a55295&&i<57344){if(!o){if(i>56319){(t-=3)>-1&&r.push(239,191,189);continue}if(a+1===n){(t-=3)>-1&&r.push(239,191,189);continue}o=i;continue}if(i<56320){(t-=3)>-1&&r.push(239,191,189),o=i;continue}i=65536+(o-55296<<10|i-56320)}else o&&(t-=3)>-1&&r.push(239,191,189);if(o=null,i<128){if((t-=1)<0)break;r.push(i)}else if(i<2048){if((t-=2)<0)break;r.push(i>>6|192,63&i|128)}else if(i<65536){if((t-=3)<0)break;r.push(i>>12|224,i>>6&63|128,63&i|128)}else{if(!(i<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;r.push(i>>18|240,i>>12&63|128,i>>6&63|128,63&i|128)}}return r}function U(e){for(var t=[],i=0;i>8,o=i%256,r.push(o),r.push(n);return r}function Y(e){return Q.toByteArray(H(e))}function q(e,t,i,n){for(var o=0;o=t.length||o>=e.length);++o)t[o+i]=e[o];return o}function X(e){return e!==e}/*! +var o=i(183),r=i(13),a=i(0),s=i(548),l=i(551),c=i(552),u=i(559),h=!r.canvasSupported,d={canvas:i(550)},p={},f={};f.version="3.6.0",f.init=function(e,t){var i=new g(o(),e,t);return p[i.id]=i,i},f.dispose=function(e){if(e)e.dispose();else{for(var t in p)p.hasOwnProperty(t)&&p[t].dispose();p={}}return f},f.getInstance=function(e){return p[e]},f.registerPainter=function(e,t){d[e]=t};var g=function(e,t,i){i=i||{},this.dom=t,this.id=e;var n=this,o=new l,p=i.renderer;if(h){if(!d.vml)throw new Error("You need to require 'zrender/vml/vml' to support IE8");p="vml"}else p&&d[p]||(p="canvas");var f=new d[p](t,o,i);this.storage=o,this.painter=f;var g=r.node?null:new u(f.getViewportRoot());this.handler=new s(o,f,g,f.root),this.animation=new c({stage:{update:a.bind(this.flush,this)}}),this.animation.start(),this._needsRefresh;var m=o.delFromStorage,v=o.addToStorage;o.delFromStorage=function(e){m.call(o,e),e&&e.removeSelfFromZr(n)},o.addToStorage=function(e){v.call(o,e),e.addSelfToZr(n)}};g.prototype={constructor:g,getId:function(){return this.id},add:function(e){this.storage.addRoot(e),this._needsRefresh=!0},remove:function(e){this.storage.delRoot(e),this._needsRefresh=!0},configLayer:function(e,t){this.painter.configLayer(e,t),this._needsRefresh=!0},refreshImmediately:function(){this._needsRefresh=!1,this.painter.refresh(),this._needsRefresh=!1},refresh:function(){this._needsRefresh=!0},flush:function(){this._needsRefresh&&this.refreshImmediately(),this._needsRefreshHover&&this.refreshHoverImmediately()},addHover:function(e,t){this.painter.addHover&&(this.painter.addHover(e,t),this.refreshHover())},removeHover:function(e){this.painter.removeHover&&(this.painter.removeHover(e),this.refreshHover())},clearHover:function(){this.painter.clearHover&&(this.painter.clearHover(),this.refreshHover())},refreshHover:function(){this._needsRefreshHover=!0},refreshHoverImmediately:function(){this._needsRefreshHover=!1,this.painter.refreshHover&&this.painter.refreshHover()},resize:function(e){e=e||{},this.painter.resize(e.width,e.height),this.handler.resize()},clearAnimation:function(){this.animation.clear()},getWidth:function(){return this.painter.getWidth()},getHeight:function(){return this.painter.getHeight()},pathToImage:function(e,t){return this.painter.pathToImage(e,t)},setCursorStyle:function(e){this.handler.setCursorStyle(e)},findHover:function(e,t){return this.handler.findHover(e,t)},on:function(e,t,i){this.handler.on(e,t,i)},off:function(e,t){this.handler.off(e,t)},trigger:function(e,t){this.handler.trigger(e,t)},clear:function(){this.storage.delRoot(),this.painter.clear()},dispose:function(){this.animation.stop(),this.clear(),this.storage.dispose(),this.painter.dispose(),this.handler.dispose(),this.animation=this.storage=this.painter=this.handler=null,n(this.id)}},e.exports=f},function(e,t,i){"use strict";var n=i(17),o=i(535),r=i(517),a=i.n(r),s=i(521),l=i.n(s),c=i(522),u=i.n(c),h=i(518),d=i.n(h),p=i(519),f=i.n(p),g=i(520),m=i.n(g),v=i(516),x=i.n(v),y=i(515),_=i.n(y),b=i(523),w=i.n(b);n.default.use(o.a),t.a=new o.a({routes:[{path:"/",name:"Overview",component:a.a},{path:"/client/offline",name:"OfflineClient",component:_.a},{path:"/client/online",name:"OnlineClient",component:x.a},{path:"/proxies/tcp",name:"ProxiesTcp",component:l.a},{path:"/proxies/udp",name:"ProxiesUdp",component:u.a},{path:"/proxies/ftp",name:"ProxiesFtp",component:d.a},{path:"/proxies/http",name:"ProxiesHttp",component:f.a},{path:"/proxies/https",name:"ProxiesHttps",component:m.a},{path:"/search",name:"Search",component:w.a}]})},function(e,t,i){var n=i(236);"string"==typeof n&&(n=[[e.i,n,""]]);i(172)(n,{});n.locals&&(e.exports=n.locals)},function(e,t,i){var n=i(237);"string"==typeof n&&(n=[[e.i,n,""]]);i(172)(n,{});n.locals&&(e.exports=n.locals)},function(e,t,i){i(539);var n=i(20)(i(221),i(527),null,null);e.exports=n.exports},function(e,t){!function(e){"use strict";function t(e){if("string"!=typeof e&&(e=String(e)),/[^a-z0-9\-#$%&'*+.\^_`|~]/i.test(e))throw new TypeError("Invalid character in header field name");return e.toLowerCase()}function i(e){return"string"!=typeof e&&(e=String(e)),e}function n(e){var t={next:function(){var t=e.shift();return{done:void 0===t,value:t}}};return v.iterable&&(t[Symbol.iterator]=function(){return t}),t}function o(e){this.map={},e instanceof o?e.forEach(function(e,t){this.append(t,e)},this):Array.isArray(e)?e.forEach(function(e){this.append(e[0],e[1])},this):e&&Object.getOwnPropertyNames(e).forEach(function(t){this.append(t,e[t])},this)}function r(e){if(e.bodyUsed)return Promise.reject(new TypeError("Already read"));e.bodyUsed=!0}function a(e){return new Promise(function(t,i){e.onload=function(){t(e.result)},e.onerror=function(){i(e.error)}})}function s(e){var t=new FileReader,i=a(t);return t.readAsArrayBuffer(e),i}function l(e){var t=new FileReader,i=a(t);return t.readAsText(e),i}function c(e){for(var t=new Uint8Array(e),i=new Array(t.length),n=0;n-1?t:e}function p(e,t){t=t||{};var i=t.body;if(e instanceof p){if(e.bodyUsed)throw new TypeError("Already read");this.url=e.url,this.credentials=e.credentials,t.headers||(this.headers=new o(e.headers)),this.method=e.method,this.mode=e.mode,i||null==e._bodyInit||(i=e._bodyInit,e.bodyUsed=!0)}else this.url=String(e);if(this.credentials=t.credentials||this.credentials||"omit",!t.headers&&this.headers||(this.headers=new o(t.headers)),this.method=d(t.method||this.method||"GET"),this.mode=t.mode||this.mode||null,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&i)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(i)}function f(e){var t=new FormData;return e.trim().split("&").forEach(function(e){if(e){var i=e.split("="),n=i.shift().replace(/\+/g," "),o=i.join("=").replace(/\+/g," ");t.append(decodeURIComponent(n),decodeURIComponent(o))}}),t}function g(e){var t=new o;return e.split(/\r?\n/).forEach(function(e){var i=e.split(":"),n=i.shift().trim();if(n){var o=i.join(":").trim();t.append(n,o)}}),t}function m(e,t){t||(t={}),this.type="default",this.status="status"in t?t.status:200,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in t?t.statusText:"OK",this.headers=new o(t.headers),this.url=t.url||"",this._initBody(e)}if(!e.fetch){var v={searchParams:"URLSearchParams"in e,iterable:"Symbol"in e&&"iterator"in Symbol,blob:"FileReader"in e&&"Blob"in e&&function(){try{return new Blob,!0}catch(e){return!1}}(),formData:"FormData"in e,arrayBuffer:"ArrayBuffer"in e};if(v.arrayBuffer)var x=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],y=function(e){return e&&DataView.prototype.isPrototypeOf(e)},_=ArrayBuffer.isView||function(e){return e&&x.indexOf(Object.prototype.toString.call(e))>-1};o.prototype.append=function(e,n){e=t(e),n=i(n);var o=this.map[e];this.map[e]=o?o+","+n:n},o.prototype.delete=function(e){delete this.map[t(e)]},o.prototype.get=function(e){return e=t(e),this.has(e)?this.map[e]:null},o.prototype.has=function(e){return this.map.hasOwnProperty(t(e))},o.prototype.set=function(e,n){this.map[t(e)]=i(n)},o.prototype.forEach=function(e,t){for(var i in this.map)this.map.hasOwnProperty(i)&&e.call(t,this.map[i],i,this)},o.prototype.keys=function(){var e=[];return this.forEach(function(t,i){e.push(i)}),n(e)},o.prototype.values=function(){var e=[];return this.forEach(function(t){e.push(t)}),n(e)},o.prototype.entries=function(){var e=[];return this.forEach(function(t,i){e.push([i,t])}),n(e)},v.iterable&&(o.prototype[Symbol.iterator]=o.prototype.entries);var b=["DELETE","GET","HEAD","OPTIONS","POST","PUT"];p.prototype.clone=function(){return new p(this,{body:this._bodyInit})},h.call(p.prototype),h.call(m.prototype),m.prototype.clone=function(){return new m(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new o(this.headers),url:this.url})},m.error=function(){var e=new m(null,{status:0,statusText:""});return e.type="error",e};var w=[301,302,303,307,308];m.redirect=function(e,t){if(-1===w.indexOf(t))throw new RangeError("Invalid status code");return new m(null,{status:t,headers:{location:e}})},e.Headers=o,e.Request=p,e.Response=m,e.fetch=function(e,t){return new Promise(function(i,n){var o=new p(e,t),r=new XMLHttpRequest;r.onload=function(){var e={status:r.status,statusText:r.statusText,headers:g(r.getAllResponseHeaders()||"")};e.url="responseURL"in r?r.responseURL:e.headers.get("X-Request-URL");var t="response"in r?r.response:r.responseText;i(new m(t,e))},r.onerror=function(){n(new TypeError("Network request failed"))},r.ontimeout=function(){n(new TypeError("Network request failed"))},r.open(o.method,o.url,!0),"include"===o.credentials&&(r.withCredentials=!0),"responseType"in r&&v.blob&&(r.responseType="blob"),o.headers.forEach(function(e,t){r.setRequestHeader(t,e)}),r.send(void 0===o._bodyInit?null:o._bodyInit)})},e.fetch.polyfill=!0}}("undefined"!=typeof self?self:this)},,,,,,,,,,,,,,,,,,,,,,,function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={methods:{handleSelect:function(e,t){""==e&&window.open("https://github.com/liudf0716/xfrps")}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=(i.n(n),i(40)),r=i.n(o),a=i(111);t.default={data:function(){return{clients:null,parentTotalPage:1,parentCurrentPage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},components:{pagination:r.a},methods:{fetchData:function(e){var t=this;0==e?fetch("/api/client/offline",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentTotalPage=e.total_page,t.parentCurrentPage=1,fetch("/api/client/offline/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.clients=new Array;var i=!0,n=!1,o=void 0;try{for(var r,s=e.clients[Symbol.iterator]();!(i=(r=s.next()).done);i=!0){var l=r.value;t.clients.push(new a.a(l))}}catch(e){n=!0,o=e}finally{try{!i&&s.return&&s.return()}finally{if(n)throw o}}})}):fetch("/api/client/offline/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.clients=new Array;var i=!0,n=!1,o=void 0;try{for(var r,s=e.clients[Symbol.iterator]();!(i=(r=s.next()).done);i=!0){var l=r.value;t.clients.push(new a.a(l))}}catch(e){n=!0,o=e}finally{try{!i&&s.return&&s.return()}finally{if(n)throw o}}})}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=(i.n(n),i(40)),r=i.n(o),a=i(111);t.default={data:function(){return{clients:null,parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},components:{pagination:r.a},methods:{fetchData:function(e){var t=this;0==e?fetch("/api/client/online",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentTotalPage=e.total_page,t.parentCurrentPage=1,fetch("/api/client/online/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.clients=new Array;var i=!0,n=!1,o=void 0;try{for(var r,s=e.clients[Symbol.iterator]();!(i=(r=s.next()).done);i=!0){var l=r.value;t.clients.push(new a.a(l))}}catch(e){n=!0,o=e}finally{try{!i&&s.return&&s.return()}finally{if(n)throw o}}})}):fetch("/api/client/online/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.clients=new Array;var i=!0,n=!1,o=void 0;try{for(var r,s=e.clients[Symbol.iterator]();!(i=(r=s.next()).done);i=!0){var l=r.value;t.clients.push(new a.a(l))}}catch(e){n=!0,o=e}finally{try{!i&&s.return&&s.return()}finally{if(n)throw o}}})}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(110);t.default={data:function(){return{version:"",vhost_http_port:"",vhost_https_port:"",auth_timeout:"",subdomain_host:"",max_pool_count:"",heart_beat_timeout:"",client_counts:"",cur_conns:"",proxy_counts:""}},created:function(){this.fetchData()},watch:{$route:"fetchData"},methods:{fetchData:function(){var e=this;fetch("/api/serverinfo",{credentials:"include"}).then(function(e){return e.json()}).then(function(t){e.version=t.version,e.vhost_http_port=t.vhost_http_port,0==e.vhost_http_port&&(e.vhost_http_port="disable"),e.vhost_https_port=t.vhost_https_port,0==e.vhost_https_port&&(e.vhost_https_port="disable"),e.auth_timeout=t.auth_timeout,e.subdomain_host=t.subdomain_host,e.max_pool_count=t.max_pool_count,e.heart_beat_timeout=t.heart_beat_timeout,e.client_counts=t.client_counts,e.cur_conns=t.cur_conns,e.proxy_counts=0,null!=t.proxy_type_count&&(null!=t.proxy_type_count.tcp&&(e.proxy_counts+=t.proxy_type_count.tcp),null!=t.proxy_type_count.udp&&(e.proxy_counts+=t.proxy_type_count.udp),null!=t.proxy_type_count.ftp&&(e.proxy_counts+=t.proxy_type_count.ftp),null!=t.proxy_type_count.http&&(e.proxy_counts+=t.proxy_type_count.http),null!=t.proxy_type_count.https&&(e.proxy_counts+=t.proxy_type_count.https)),i.i(n.b)("traffic",t.total_traffic_in,t.total_traffic_out),i.i(n.c)("proxies",t)}).catch(function(t){e.$message({showClose:!0,message:"Get server info from frps failed!",type:"warning"})})}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(58),a=i.n(r),s=i(40),l=i.n(s),c=i(50);t.default={data:function(){return{proxies:null,parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;0==e?fetch("/api/proxy/ftp",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentCurrentpage=1,t.parentTotalPage=e.total_page,fetch("/api/proxy/ftp/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.b(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(this.parentCurrentpage=e,fetch("/api/proxy/ftp/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.b(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}}))}},components:{"my-traffic-chart":a.a,pagination:l.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(40),a=i.n(r),s=i(58),l=i.n(s),c=i(50);t.default={data:function(){return{proxies:null,vhost_http_port:"",subdomain_host:"",parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;fetch("/api/serverinfo",{credentials:"include"}).then(function(e){return e.json()}).then(function(i){t.vhost_http_port=i.vhost_http_port,t.subdomain_host=i.subdomain_host,null!=t.vhost_http_port&&0!=t.vhost_http_port&&(0==e?fetch("/api/proxy/http",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentTotalPage=e.total_page,t.parentCurrentPage=1,fetch("/api/proxy/http/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.a(s,t.vhost_http_port,t.subdomain_host))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(t.parentCurrentpage=e,fetch("/api/proxy/http/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.a(s,t.vhost_http_port,t.subdomain_host))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})))})}},components:{"my-traffic-chart":l.a,pagination:a.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(40),a=i.n(r),s=i(58),l=i.n(s);i(50);t.default={data:function(){return{proxies:null,vhost_https_port:"",subdomain_host:"",parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;fetch("/api/serverinfo",{credentials:"include"}).then(function(e){return e.json()}).then(function(i){t.vhost_https_port=i.vhost_https_port,t.subdomain_host=i.subdomain_host,null!=t.vhost_https_port&&0!=t.vhost_https_port&&(0==e?fetch("/api/proxy/https",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentTotalPage=e.total_page,t.parentCurrentPage=1,fetch("/api/proxy/https/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new HttpsProxy(s,t.vhost_https_port,t.subdomain_host))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(t.parentCurrentpage=e,fetch("/api/proxy/https/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new HttpsProxy(s,t.vhost_https_port,t.subdomain_host))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})))})}},components:{"my-traffic-chart":l.a,pagination:a.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(58),a=i.n(r),s=i(40),l=i.n(s),c=i(50);t.default={data:function(){return{proxies:null,parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;0==e?fetch("/api/proxy/tcp",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentCurrentpage=1,t.parentTotalPage=e.total_page,fetch("/api/proxy/tcp/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.d(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(this.parentCurrentpage=e,fetch("/api/proxy/tcp/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.d(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}}))}},components:{"my-traffic-chart":a.a,pagination:l.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(34),o=i.n(n),r=i(58),a=i.n(r),s=i(40),l=i.n(s),c=i(50);t.default={data:function(){return{proxies:null,parentTotalPage:1,parentCurrentpage:1}},created:function(){this.fetchData(0)},watch:{$route:"fetchData"},methods:{formatTrafficIn:function(e,t){return o.a.fileSize(e.traffic_in)},formatTrafficOut:function(e,t){return o.a.fileSize(e.traffic_out)},fetchData:function(e){var t=this;0==e?fetch("/api/proxy/udp",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.parentCurrentpage=1,t.parentTotalPage=e.total_page,fetch("/api/proxy/udp/1",{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.c(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}})}):(this.parentCurrentpage=e,fetch("/api/proxy/udp/"+e,{credentials:"include"}).then(function(e){return e.json()}).then(function(e){t.proxies=new Array;var i=!0,n=!1,o=void 0;try{for(var r,a=e.proxies[Symbol.iterator]();!(i=(r=a.next()).done);i=!0){var s=r.value;t.proxies.push(new c.c(s))}}catch(e){n=!0,o=e}finally{try{!i&&a.return&&a.return()}finally{if(n)throw o}}}))}},components:{"my-traffic-chart":a.a,pagination:l.a}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(110);t.default={props:["proxy_name"],created:function(){this.fetchData()},methods:{fetchData:function(){var e=this,t="/api/proxy/traffic/"+this.proxy_name;fetch(t,{credentials:"include"}).then(function(e){return e.json()}).then(function(t){i.i(n.a)(e.proxy_name,t.traffic_in,t.traffic_out)}).catch(function(t){e.$message({showClose:!0,message:"Get server info from frps failed!"+t,type:"warning"})})}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default={props:{totalPage:{type:Number,default:1,required:!0,validator:function(e){return e>=0}},currentPage:{type:Number,default:1,validator:function(e){return e>=0}},changeCallback:{type:Function,default:function(e){console.log("默认回调,显示页码:"+e)}}},data:function(){return{myCurrentPage:1,isPageNumberError:!1}},computed:{currentPage:function(){return this.myCurrentPage}},methods:{turnToPage:function(e){var t=this,e=parseInt(e);if(!e||e>t.totalPage||e<1)return console.log("页码输入有误!"),t.isPageNumberError=!0,!1;t.isPageNumberError=!1,t.myCurrentPage=e,t.changeCallback(e)}}}},function(e,t,i){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=i(17),o=i(76),r=i.n(o),a=i(195),s=(i.n(a),i(196)),l=(i.n(s),i(197)),c=i.n(l),u=i(194),h=i(198);i.n(h);n.default.use(r.a),n.default.config.productionTip=!1,new n.default({el:"#app",router:u.a,template:"",components:{App:c.a}})},,function(e,t,i){"use strict";function n(e){var t=e.length;if(t%4>0)throw new Error("Invalid string. Length must be a multiple of 4");return"="===e[t-2]?2:"="===e[t-1]?1:0}function o(e){return 3*e.length/4-n(e)}function r(e){var t,i,o,r,a,s=e.length;r=n(e),a=new h(3*s/4-r),i=r>0?s-4:s;var l=0;for(t=0;t>16&255,a[l++]=o>>8&255,a[l++]=255&o;return 2===r?(o=u[e.charCodeAt(t)]<<2|u[e.charCodeAt(t+1)]>>4,a[l++]=255&o):1===r&&(o=u[e.charCodeAt(t)]<<10|u[e.charCodeAt(t+1)]<<4|u[e.charCodeAt(t+2)]>>2,a[l++]=o>>8&255,a[l++]=255&o),a}function a(e){return c[e>>18&63]+c[e>>12&63]+c[e>>6&63]+c[63&e]}function s(e,t,i){for(var n,o=[],r=t;rl?l:a+16383));return 1===n?(t=e[i-1],o+=c[t>>2],o+=c[t<<4&63],o+="=="):2===n&&(t=(e[i-2]<<8)+e[i-1],o+=c[t>>10],o+=c[t>>4&63],o+=c[t<<2&63],o+="="),r.push(o),r.join("")}t.byteLength=o,t.toByteArray=r,t.fromByteArray=l;for(var c=[],u=[],h="undefined"!=typeof Uint8Array?Uint8Array:Array,d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",p=0,f=d.length;p=n())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+n().toString(16)+" bytes");return 0|e}function g(e){return+e!=e&&(e=0),r.alloc(+e)}function m(e,t){if(r.isBuffer(e))return e.length;if("undefined"!=typeof ArrayBuffer&&"function"==typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!=typeof e&&(e=""+e);var i=e.length;if(0===i)return 0;for(var n=!1;;)switch(t){case"ascii":case"latin1":case"binary":return i;case"utf8":case"utf-8":case void 0:return j(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*i;case"hex":return i>>>1;case"base64":return Y(e).length;default:if(n)return j(e).length;t=(""+t).toLowerCase(),n=!0}}function v(e,t,i){var n=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===i||i>this.length)&&(i=this.length),i<=0)return"";if(i>>>=0,t>>>=0,i<=t)return"";for(e||(e="utf8");;)switch(e){case"hex":return L(this,t,i);case"utf8":case"utf-8":return I(this,t,i);case"ascii":return P(this,t,i);case"latin1":case"binary":return D(this,t,i);case"base64":return T(this,t,i);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return z(this,t,i);default:if(n)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),n=!0}}function x(e,t,i){var n=e[t];e[t]=e[i],e[i]=n}function y(e,t,i,n,o){if(0===e.length)return-1;if("string"==typeof i?(n=i,i=0):i>2147483647?i=2147483647:i<-2147483648&&(i=-2147483648),i=+i,isNaN(i)&&(i=o?0:e.length-1),i<0&&(i=e.length+i),i>=e.length){if(o)return-1;i=e.length-1}else if(i<0){if(!o)return-1;i=0}if("string"==typeof t&&(t=r.from(t,n)),r.isBuffer(t))return 0===t.length?-1:_(e,t,i,n,o);if("number"==typeof t)return t&=255,r.TYPED_ARRAY_SUPPORT&&"function"==typeof Uint8Array.prototype.indexOf?o?Uint8Array.prototype.indexOf.call(e,t,i):Uint8Array.prototype.lastIndexOf.call(e,t,i):_(e,[t],i,n,o);throw new TypeError("val must be string, number or Buffer")}function _(e,t,i,n,o){function r(e,t){return 1===a?e[t]:e.readUInt16BE(t*a)}var a=1,s=e.length,l=t.length;if(void 0!==n&&("ucs2"===(n=String(n).toLowerCase())||"ucs-2"===n||"utf16le"===n||"utf-16le"===n)){if(e.length<2||t.length<2)return-1;a=2,s/=2,l/=2,i/=2}var c;if(o){var u=-1;for(c=i;cs&&(i=s-l),c=i;c>=0;c--){for(var h=!0,d=0;do&&(n=o):n=o;var r=t.length;if(r%2!=0)throw new TypeError("Invalid hex string");n>r/2&&(n=r/2);for(var a=0;a239?4:r>223?3:r>191?2:1;if(o+s<=i){var l,c,u,h;switch(s){case 1:r<128&&(a=r);break;case 2:l=e[o+1],128==(192&l)&&(h=(31&r)<<6|63&l)>127&&(a=h);break;case 3:l=e[o+1],c=e[o+2],128==(192&l)&&128==(192&c)&&(h=(15&r)<<12|(63&l)<<6|63&c)>2047&&(h<55296||h>57343)&&(a=h);break;case 4:l=e[o+1],c=e[o+2],u=e[o+3],128==(192&l)&&128==(192&c)&&128==(192&u)&&(h=(15&r)<<18|(63&l)<<12|(63&c)<<6|63&u)>65535&&h<1114112&&(a=h)}}null===a?(a=65533,s=1):a>65535&&(a-=65536,n.push(a>>>10&1023|55296),a=56320|1023&a),n.push(a),o+=s}return C(n)}function C(e){var t=e.length;if(t<=$)return String.fromCharCode.apply(String,e);for(var i="",n=0;nn)&&(i=n);for(var o="",r=t;ri)throw new RangeError("Trying to access beyond buffer length")}function O(e,t,i,n,o,a){if(!r.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>o||te.length)throw new RangeError("Index out of range")}function R(e,t,i,n){t<0&&(t=65535+t+1);for(var o=0,r=Math.min(e.length-i,2);o>>8*(n?o:1-o)}function N(e,t,i,n){t<0&&(t=4294967295+t+1);for(var o=0,r=Math.min(e.length-i,4);o>>8*(n?o:3-o)&255}function B(e,t,i,n,o,r){if(i+n>e.length)throw new RangeError("Index out of range");if(i<0)throw new RangeError("Index out of range")}function V(e,t,i,n,o){return o||B(e,t,i,4,3.4028234663852886e38,-3.4028234663852886e38),J.write(e,t,i,n,23,4),i+4}function G(e,t,i,n,o){return o||B(e,t,i,8,1.7976931348623157e308,-1.7976931348623157e308),J.write(e,t,i,n,52,8),i+8}function H(e){if(e=F(e).replace(ee,""),e.length<2)return"";for(;e.length%4!=0;)e+="=";return e}function F(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}function W(e){return e<16?"0"+e.toString(16):e.toString(16)}function j(e,t){t=t||1/0;for(var i,n=e.length,o=null,r=[],a=0;a55295&&i<57344){if(!o){if(i>56319){(t-=3)>-1&&r.push(239,191,189);continue}if(a+1===n){(t-=3)>-1&&r.push(239,191,189);continue}o=i;continue}if(i<56320){(t-=3)>-1&&r.push(239,191,189),o=i;continue}i=65536+(o-55296<<10|i-56320)}else o&&(t-=3)>-1&&r.push(239,191,189);if(o=null,i<128){if((t-=1)<0)break;r.push(i)}else if(i<2048){if((t-=2)<0)break;r.push(i>>6|192,63&i|128)}else if(i<65536){if((t-=3)<0)break;r.push(i>>12|224,i>>6&63|128,63&i|128)}else{if(!(i<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;r.push(i>>18|240,i>>12&63|128,i>>6&63|128,63&i|128)}}return r}function U(e){for(var t=[],i=0;i>8,o=i%256,r.push(o),r.push(n);return r}function Y(e){return Q.toByteArray(H(e))}function q(e,t,i,n){for(var o=0;o=t.length||o>=e.length);++o)t[o+i]=e[o];return o}function X(e){return e!==e}/*! * The buffer module from node.js, for the browser. * * @author Feross Aboukhadijeh diff --git a/client/control.go b/client/control.go index bce6f4e..d0092d4 100644 --- a/client/control.go +++ b/client/control.go @@ -23,13 +23,13 @@ import ( "sync" "time" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/models/msg" - "github.com/KunTengRom/xfrps/utils/crypto" - "github.com/KunTengRom/xfrps/utils/log" - "github.com/KunTengRom/xfrps/utils/net" - "github.com/KunTengRom/xfrps/utils/util" - "github.com/KunTengRom/xfrps/utils/version" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/models/msg" + "github.com/liudf0716/xfrps/utils/crypto" + "github.com/liudf0716/xfrps/utils/log" + "github.com/liudf0716/xfrps/utils/net" + "github.com/liudf0716/xfrps/utils/util" + "github.com/liudf0716/xfrps/utils/version" "github.com/xtaci/smux" ) diff --git a/client/proxy.go b/client/proxy.go index 0c6a63d..3bda2f4 100644 --- a/client/proxy.go +++ b/client/proxy.go @@ -23,14 +23,14 @@ import ( "sync" "time" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/models/msg" - "github.com/KunTengRom/xfrps/models/plugin" - "github.com/KunTengRom/xfrps/models/proto/tcp" - "github.com/KunTengRom/xfrps/models/proto/udp" - "github.com/KunTengRom/xfrps/utils/errors" - "github.com/KunTengRom/xfrps/utils/log" - frpNet "github.com/KunTengRom/xfrps/utils/net" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/models/msg" + "github.com/liudf0716/xfrps/models/plugin" + "github.com/liudf0716/xfrps/models/proto/tcp" + "github.com/liudf0716/xfrps/models/proto/udp" + "github.com/liudf0716/xfrps/utils/errors" + "github.com/liudf0716/xfrps/utils/log" + frpNet "github.com/liudf0716/xfrps/utils/net" ) // Proxy defines how to work for different proxy type. diff --git a/client/service.go b/client/service.go index 439f9eb..7507f73 100644 --- a/client/service.go +++ b/client/service.go @@ -14,7 +14,7 @@ package client -import "github.com/KunTengRom/xfrps/models/config" +import "github.com/liudf0716/xfrps/models/config" type Service struct { // manager control connection with server diff --git a/cmd/frpc/frpc b/cmd/frpc/frpc deleted file mode 100755 index 219d89f..0000000 Binary files a/cmd/frpc/frpc and /dev/null differ diff --git a/cmd/frps/frps b/cmd/frps/frps deleted file mode 100755 index 93f8b9b..0000000 Binary files a/cmd/frps/frps and /dev/null differ diff --git a/cmd/frpc/main.go b/cmd/xfrpc/main.go similarity index 94% rename from cmd/frpc/main.go rename to cmd/xfrpc/main.go index b62d08a..d36f267 100644 --- a/cmd/frpc/main.go +++ b/cmd/xfrpc/main.go @@ -23,10 +23,10 @@ import ( docopt "github.com/docopt/docopt-go" ini "github.com/vaughan0/go-ini" - "github.com/KunTengRom/xfrps/client" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/utils/log" - "github.com/KunTengRom/xfrps/utils/version" + "github.com/liudf0716/xfrps/client" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/utils/log" + "github.com/liudf0716/xfrps/utils/version" ) var ( diff --git a/cmd/frps/main.go b/cmd/xfrps/main.go similarity index 94% rename from cmd/frps/main.go rename to cmd/xfrps/main.go index 810a02a..efec323 100644 --- a/cmd/frps/main.go +++ b/cmd/xfrps/main.go @@ -23,10 +23,10 @@ import ( docopt "github.com/docopt/docopt-go" ini "github.com/vaughan0/go-ini" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/server" - "github.com/KunTengRom/xfrps/utils/log" - "github.com/KunTengRom/xfrps/utils/version" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/server" + "github.com/liudf0716/xfrps/utils/log" + "github.com/liudf0716/xfrps/utils/version" ) var usage string = `frps is the server of frp diff --git a/doc/quick_start_en.md b/doc/quick_start_en.md index 7850226..36b1022 100644 --- a/doc/quick_start_en.md +++ b/doc/quick_start_en.md @@ -9,11 +9,11 @@ We will use two simple demo to demonstrate how to use frp. ### Download SourceCode -`go get github.com/KunTengRom/xfrps` is recommended, then the code will be copied to the directory `$GOPATH/src/github.com/KunTengRom/xfrps`. +`go get github.com/liudf0716/xfrps` is recommended, then the code will be copied to the directory `$GOPATH/src/github.com/liudf0716/xfrps`. -Or you can use `git clone https://github.com/KunTengRom/xfrps.git $GOPATH/src/github.com/KunTengRom/xfrps`. +Or you can use `git clone https://github.com/liudf0716/xfrps.git $GOPATH/src/github.com/liudf0716/xfrps`. -If you want to try it quickly, download the compiled program and configuration files from [https://github.com/KunTengRom/xfrps/releases](https://github.com/KunTengRom/xfrps/releases). +If you want to try it quickly, download the compiled program and configuration files from [https://github.com/liudf0716/xfrps/releases](https://github.com/liudf0716/xfrps/releases). ### Compile @@ -28,25 +28,25 @@ Enter the root directory and execute `make`, then wait until finished. ### Deploy -1. Move `./bin/frps` and `./conf/frps.ini` to any directory of **server B**. -2. Move `./bin/frpc` and `./conf/frpc.ini` to any directory of **server A**. +1. Move `./bin/xfrps` and `./conf/xfrps.ini` to any directory of **server B**. +2. Move `./bin/xfrpc` and `./conf/xfrpc.ini` to any directory of **server A**. 3. Modify all configuration files, details in next paragraph. -4. Execute `nohup ./frps &` or `nohup ./frps -c ./frps.ini &` in **server B**. -5. Execute `nohup ./frpc &` or `nohup ./frpc -c ./frpc.ini &` in **server A**. +4. Execute `nohup ./xfrps &` or `nohup ./xfrps -c ./xfrps.ini &` in **server B**. +5. Execute `nohup ./xfrpc &` or `nohup ./xfrpc -c ./xfrpc.ini &` in **server A**. 6. Use `ssh -oPort=6000 {user}@x.x.x.x` to test if frp is work(replace {user} to real username in **server A**), or visit custom domains by browser. ## Tcp port forwarding ### Configuration files -#### frps.ini +#### xfrps.ini ```ini [common] bind_addr = 0.0.0.0 -# for accept connections from frpc +# for accept connections from xfrpc bind_port = 7000 -log_file = ./frps.log +log_file = ./xfrps.log log_level = info # ssh is the custom name of proxy and there can be many proxies with unique name in one configure file @@ -57,23 +57,23 @@ bind_addr = 0.0.0.0 listen_port = 6000 ``` -#### frpc.ini +#### xfrpc.ini ```ini [common] -# server address of frps +# server address of xfrps server_addr = x.x.x.x server_port = 7000 -log_file = ./frpc.log +log_file = ./xfrpc.log log_level = info # for authentication auth_token = 123 -# ssh is proxy name same with configure in frps.ini +# ssh is proxy name same with configure in xfrps.ini [ssh] # local port which need to be transferred local_port = 22 -# if use_encryption equals true, messages between frpc and frps will be encrypted, default is false +# if use_encryption equals true, messages between xfrpc and xfrps will be encrypted, default is false use_encryption = true ``` @@ -87,7 +87,7 @@ After that, you can visit your web pages in local server by custom domains. ### Configuration files -#### frps.ini +#### xfrps.ini ```ini [common] @@ -95,7 +95,7 @@ bind_addr = 0.0.0.0 bind_port = 7000 # if you want to support vhost, specify one port for http services vhost_http_port = 80 -log_file = ./frps.log +log_file = ./xfrps.log log_level = info [web01] @@ -110,17 +110,17 @@ auth_token = 123 custom_domains = web02.yourdomain.com ``` -#### frpc.ini +#### xfrpc.ini ```ini [common] server_addr = x.x.x.x server_port = 7000 -log_file = ./frpc.log +log_file = ./xfrpc.log log_level = info auth_token = 123 -# custom domains are set in frps.ini +# custom domains are set in xfrps.ini [web01] type = http local_ip = 127.0.0.1 diff --git a/doc/quick_start_zh.md b/doc/quick_start_zh.md index d02b36a..1cc0ac8 100644 --- a/doc/quick_start_zh.md +++ b/doc/quick_start_zh.md @@ -1,17 +1,17 @@ -# frp 使用文档 +# xfrp 使用文档 -相比于其他项目而言 frp 更易于部署和使用,这里我们用两个简单的示例来演示 frp 的使用过程。 +相比于其他项目而言 xfrp 更易于部署和使用,这里我们用两个简单的示例来演示 xfrp 的使用过程。 1. 如何通过一台拥有公网IP地址的**服务器B**,访问处于公司内部网络环境中的**服务器A**的**ssh**端口,**服务器B**的IP地址为 x.x.x.x(测试时替换为真实的IP地址)。 2. 如何利用一台拥有公网IP地址的**服务器B**,使通过 **web01.yourdomain.com** 可以访问内网环境中**服务器A**上**8000端口**的web服务,**web02.yourdomain.com** 可以访问**服务器A**上**8001端口**的web服务。 ### 下载源码 -推荐直接使用 `go get github.com/KunTengRom/xfrps` 下载源代码安装,执行命令后代码将会拷贝到 `$GOPATH/src/github.com/KunTengRom/xfrps` 目录下。 +推荐直接使用 `go get github.com/liudf0716/xfrps` 下载源代码安装,执行命令后代码将会拷贝到 `$GOPATH/src/github.com/liudf0716/xfrps` 目录下。 -或者可以使用 `git clone https://github.com/KunTengRom/xfrps.git $GOPATH/src/github.com/KunTengRom/xfrps` 拷贝到相应目录下。 +或者可以使用 `git clone https://github.com/liudf0716/xfrps.git $GOPATH/src/github.com/liudf0716/xfrps` 拷贝到相应目录下。 -如果您想快速进行测试,也可以根据您服务器的操作系统及架构直接下载编译好的程序及示例配置文件,[https://github.com/KunTengRom/xfrps/releases](https://github.com/KunTengRom/xfrps/releases)。 +如果您想快速进行测试,也可以根据您服务器的操作系统及架构直接下载编译好的程序及示例配置文件,[https://github.com/liudf0716/xfrps/releases](https://github.com/liudf0716/xfrps/releases)。 ### 编译 @@ -26,30 +26,30 @@ ### 部署 -1. 将 ./bin/frps 和 ./conf/frps.ini 拷贝至**服务器B**任意目录。 -2. 将 ./bin/frpc 和 ./conf/frpc.ini 拷贝至**服务器A**任意目录。 +1. 将 ./bin/xfrps 和 ./conf/xfrps.ini 拷贝至**服务器B**任意目录。 +2. 将 ./bin/xfrpc 和 ./conf/xfrpc.ini 拷贝至**服务器A**任意目录。 3. 根据要实现的功能修改两边的配置文件,详细内容见后续章节说明。 -4. 在服务器B执行 `nohup ./frps &` 或者 `nohup ./frps -c ./frps.ini &`。 -5. 在服务器A执行 `nohup ./frpc &` 或者 `nohup ./frpc -c ./frpc.ini &`。 +4. 在服务器B执行 `nohup ./xfrps &` 或者 `nohup ./xfrps -c ./xfrps.ini &`。 +5. 在服务器A执行 `nohup ./xfrpc &` 或者 `nohup ./xfrpc -c ./xfrpc.ini &`。 6. 通过 `ssh -oPort=6000 {user}@x.x.x.x` 测试是否能够成功连接**服务器A**({user}替换为**服务器A**上存在的真实用户),或通过浏览器访问自定义域名验证 http 服务是否转发成功。 ## tcp 端口转发 -转发 tcp 端口需要按照需求修改 frps 和 frpc 的配置文件。 +转发 tcp 端口需要按照需求修改 xfrps 和 xfrpc 的配置文件。 ### 配置文件 -#### frps.ini +#### xfrps.ini ```ini [common] bind_addr = 0.0.0.0 -# 用于接收 frpc 连接的端口 +# 用于接收 xfrpc 连接的端口 bind_port = 7000 -log_file = ./frps.log +log_file = ./xfrps.log log_level = info -# ssh 为代理的自定义名称,可以有多个,不能重复,和frpc中名称对应 +# ssh 为代理的自定义名称,可以有多个,不能重复,和xfrpc中名称对应 [ssh] auth_token = 123 bind_addr = 0.0.0.0 @@ -57,29 +57,29 @@ bind_addr = 0.0.0.0 listen_port = 6000 ``` -#### frpc.ini +#### xfrpc.ini ```ini [common] -# frps 所在服务器绑定的IP地址 +# xfrps 所在服务器绑定的IP地址 server_addr = x.x.x.x server_port = 7000 -log_file = ./frpc.log +log_file = ./xfrpc.log log_level = info # 用于身份验证 auth_token = 123 -# ssh 需要和 frps.ini 中配置一致 +# ssh 需要和 xfrps.ini 中配置一致 [ssh] # 需要转发的本地端口 local_port = 22 -# 启用加密,frpc与frps之间通信加密,默认为 false +# 启用加密,xfrpc与xfrps之间通信加密,默认为 false use_encryption = true ``` ## http 端口转发,自定义域名绑定 -如果只需要一对一的转发,例如**服务器B**的**80端口**转发**服务器A**的**8000端口**,则只需要配置 [tcp 端口转发](/doc/quick_start_zh.md#tcp-端口转发) 即可,如果需要使**服务器B**的**80端口**可以转发至**多个**web服务端口,则需要指定代理的类型为 http,并且在 frps 的配置文件中配置用于提供 http 转发服务的端口。 +如果只需要一对一的转发,例如**服务器B**的**80端口**转发**服务器A**的**8000端口**,则只需要配置 [tcp 端口转发](/doc/quick_start_zh.md#tcp-端口转发) 即可,如果需要使**服务器B**的**80端口**可以转发至**多个**web服务端口,则需要指定代理的类型为 http,并且在 xfrps 的配置文件中配置用于提供 http 转发服务的端口。 按照如下的内容修改配置文件后,需要将自定义域名的 **A 记录**解析到 [server_addr],如果 [server_addr] 是域名也可以将自定义域名的 **CNAME 记录**解析到 [server_addr]。 @@ -87,7 +87,7 @@ use_encryption = true ### 配置文件 -#### frps.ini +#### xfrps.ini ```ini [common] @@ -95,7 +95,7 @@ bind_addr = 0.0.0.0 bind_port = 7000 # 如果需要支持http类型的代理则需要指定一个端口 vhost_http_port = 80 -log_file = ./frps.log +log_file = ./xfrps.log log_level = info [web01] @@ -111,18 +111,18 @@ auth_token = 123 custom_domains = web02.yourdomain.com ``` -#### frpc.ini +#### xfrpc.ini ```ini [common] server_addr = x.x.x.x server_port = 7000 -log_file = ./frpc.log +log_file = ./xfrpc.log log_level = info auth_token = 123 -# 自定义域名在 frps.ini 中配置,方便做统一管理 +# 自定义域名在 xfrps.ini 中配置,方便做统一管理 [web01] type = http local_ip = 127.0.0.1 diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..6b0085b --- /dev/null +++ b/go.mod @@ -0,0 +1,17 @@ +module github.com/liudf0716/xfrps + +go 1.13 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 + github.com/fatedier/beego v1.7.2 + github.com/golang/snappy v0.0.4 + github.com/julienschmidt/httprouter v1.3.0 + github.com/pkg/errors v0.9.1 // indirect + github.com/rakyll/statik v0.1.7 + github.com/stretchr/testify v1.7.1 // indirect + github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec + github.com/xtaci/smux v1.5.16 + golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..1f20536 --- /dev/null +++ b/go.sum @@ -0,0 +1,36 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/fatedier/beego v1.7.2 h1:kVw3oKiXccInqG+Z/7l8zyRQXrsCQEfcUxgzfGK+R8g= +github.com/fatedier/beego v1.7.2/go.mod h1:wx3gB6dbIfBRcucp94PI9Bt3I0F2c/MyNEWuhzpWiwk= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks= +github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= +github.com/xtaci/smux v1.5.16 h1:FBPYOkW8ZTjLKUM4LI4xnnuuDC8CQ/dB04HD519WoEk= +github.com/xtaci/smux v1.5.16/go.mod h1:OMlQbT5vcgl2gb49mFkYo6SMf+zP3rcjcwQz7ZU7IGY= +golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29 h1:tkVvjkPTB7pnW3jnid7kNyAMPVWllTNOf/qKDze4p9o= +golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/models/config/proxy.go b/models/config/proxy.go index f2ae4d2..de8b180 100644 --- a/models/config/proxy.go +++ b/models/config/proxy.go @@ -20,10 +20,10 @@ import ( "strconv" "strings" - "github.com/KunTengRom/xfrps/models/consts" - "github.com/KunTengRom/xfrps/models/msg" + "github.com/liudf0716/xfrps/models/consts" + "github.com/liudf0716/xfrps/models/msg" - "github.com/KunTengRom/xfrps/utils/util" + "github.com/liudf0716/xfrps/utils/util" ini "github.com/vaughan0/go-ini" ) diff --git a/models/config/server_common.go b/models/config/server_common.go index 5dab462..db0a9ea 100644 --- a/models/config/server_common.go +++ b/models/config/server_common.go @@ -19,7 +19,7 @@ import ( "strconv" "strings" - "github.com/KunTengRom/xfrps/utils/util" + "github.com/liudf0716/xfrps/utils/util" ini "github.com/vaughan0/go-ini" ) diff --git a/models/msg/pack.go b/models/msg/pack.go index af305de..af68d34 100644 --- a/models/msg/pack.go +++ b/models/msg/pack.go @@ -21,7 +21,7 @@ import ( "fmt" "reflect" - "github.com/KunTengRom/xfrps/utils/errors" + "github.com/liudf0716/xfrps/utils/errors" ) func unpack(typeByte byte, buffer []byte, msgIn Message) (msg Message, err error) { diff --git a/models/msg/pack_test.go b/models/msg/pack_test.go index 988fc67..99d6b94 100644 --- a/models/msg/pack_test.go +++ b/models/msg/pack_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/KunTengRom/xfrps/utils/errors" + "github.com/liudf0716/xfrps/utils/errors" ) type TestStruct struct{} diff --git a/models/plugin/http_proxy.go b/models/plugin/http_proxy.go index 6fdbad0..7f6fca8 100644 --- a/models/plugin/http_proxy.go +++ b/models/plugin/http_proxy.go @@ -23,9 +23,9 @@ import ( "strings" "sync" - "github.com/KunTengRom/xfrps/models/proto/tcp" - "github.com/KunTengRom/xfrps/utils/errors" - frpNet "github.com/KunTengRom/xfrps/utils/net" + "github.com/liudf0716/xfrps/models/proto/tcp" + "github.com/liudf0716/xfrps/utils/errors" + frpNet "github.com/liudf0716/xfrps/utils/net" ) const PluginHttpProxy = "http_proxy" diff --git a/models/plugin/unix_domain_socket.go b/models/plugin/unix_domain_socket.go index e46c22d..bb2d2ba 100644 --- a/models/plugin/unix_domain_socket.go +++ b/models/plugin/unix_domain_socket.go @@ -19,7 +19,7 @@ import ( "io" "net" - "github.com/KunTengRom/xfrps/models/proto/tcp" + "github.com/liudf0716/xfrps/models/proto/tcp" ) const PluginUnixDomainSocket = "unix_domain_socket" diff --git a/models/proto/tcp/tcp.go b/models/proto/tcp/tcp.go index 75a555e..5630907 100644 --- a/models/proto/tcp/tcp.go +++ b/models/proto/tcp/tcp.go @@ -19,7 +19,7 @@ import ( "github.com/golang/snappy" - "github.com/KunTengRom/xfrps/utils/crypto" + "github.com/liudf0716/xfrps/utils/crypto" ) func WithEncryption(rwc io.ReadWriteCloser, key []byte) (io.ReadWriteCloser, error) { diff --git a/models/proto/udp/udp.go b/models/proto/udp/udp.go index 51ee24d..94af802 100644 --- a/models/proto/udp/udp.go +++ b/models/proto/udp/udp.go @@ -20,9 +20,9 @@ import ( "sync" "time" - "github.com/KunTengRom/xfrps/models/msg" - "github.com/KunTengRom/xfrps/utils/errors" - "github.com/KunTengRom/xfrps/utils/pool" + "github.com/liudf0716/xfrps/models/msg" + "github.com/liudf0716/xfrps/utils/errors" + "github.com/liudf0716/xfrps/utils/pool" ) func NewUdpPacket(buf []byte, laddr, raddr *net.UDPAddr) *msg.UdpPacket { diff --git a/server/control.go b/server/control.go index 4832310..bf8fd93 100644 --- a/server/control.go +++ b/server/control.go @@ -20,15 +20,15 @@ import ( "sync" "time" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/models/consts" - "github.com/KunTengRom/xfrps/models/msg" - "github.com/KunTengRom/xfrps/utils/crypto" - "github.com/KunTengRom/xfrps/utils/errors" - "github.com/KunTengRom/xfrps/utils/net" - "github.com/KunTengRom/xfrps/utils/shutdown" - "github.com/KunTengRom/xfrps/utils/util" - "github.com/KunTengRom/xfrps/utils/version" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/models/consts" + "github.com/liudf0716/xfrps/models/msg" + "github.com/liudf0716/xfrps/utils/crypto" + "github.com/liudf0716/xfrps/utils/errors" + "github.com/liudf0716/xfrps/utils/net" + "github.com/liudf0716/xfrps/utils/shutdown" + "github.com/liudf0716/xfrps/utils/util" + "github.com/liudf0716/xfrps/utils/version" ) type Control struct { diff --git a/server/dashboard.go b/server/dashboard.go index a87a251..6cc92c1 100644 --- a/server/dashboard.go +++ b/server/dashboard.go @@ -23,8 +23,8 @@ import ( "strings" "time" - "github.com/KunTengRom/xfrps/assets" - "github.com/KunTengRom/xfrps/models/config" + "github.com/liudf0716/xfrps/assets" + "github.com/liudf0716/xfrps/models/config" "github.com/julienschmidt/httprouter" ) diff --git a/server/dashboard_api.go b/server/dashboard_api.go index 9db2982..01cdd87 100644 --- a/server/dashboard_api.go +++ b/server/dashboard_api.go @@ -19,11 +19,11 @@ import ( "net/http" "strconv" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/models/consts" - "github.com/KunTengRom/xfrps/utils/log" - "github.com/KunTengRom/xfrps/utils/util" - "github.com/KunTengRom/xfrps/utils/version" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/models/consts" + "github.com/liudf0716/xfrps/utils/log" + "github.com/liudf0716/xfrps/utils/util" + "github.com/liudf0716/xfrps/utils/version" "github.com/julienschmidt/httprouter" ) diff --git a/server/metric.go b/server/metric.go index 28e4d1f..8de0abc 100644 --- a/server/metric.go +++ b/server/metric.go @@ -18,9 +18,9 @@ import ( "sync" "time" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/utils/log" - "github.com/KunTengRom/xfrps/utils/metric" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/utils/log" + "github.com/liudf0716/xfrps/utils/metric" ) const ( diff --git a/server/proxy.go b/server/proxy.go index 011c424..055ad48 100644 --- a/server/proxy.go +++ b/server/proxy.go @@ -22,14 +22,14 @@ import ( "sync" "time" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/models/msg" - "github.com/KunTengRom/xfrps/models/proto/tcp" - "github.com/KunTengRom/xfrps/models/proto/udp" - "github.com/KunTengRom/xfrps/utils/errors" - "github.com/KunTengRom/xfrps/utils/log" - frpNet "github.com/KunTengRom/xfrps/utils/net" - "github.com/KunTengRom/xfrps/utils/vhost" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/models/msg" + "github.com/liudf0716/xfrps/models/proto/tcp" + "github.com/liudf0716/xfrps/models/proto/udp" + "github.com/liudf0716/xfrps/utils/errors" + "github.com/liudf0716/xfrps/utils/log" + frpNet "github.com/liudf0716/xfrps/utils/net" + "github.com/liudf0716/xfrps/utils/vhost" ) type Proxy interface { diff --git a/server/service.go b/server/service.go index ecb7781..f419f77 100644 --- a/server/service.go +++ b/server/service.go @@ -18,14 +18,14 @@ import ( "fmt" "time" - "github.com/KunTengRom/xfrps/assets" - "github.com/KunTengRom/xfrps/models/config" - "github.com/KunTengRom/xfrps/models/msg" - "github.com/KunTengRom/xfrps/utils/log" - frpNet "github.com/KunTengRom/xfrps/utils/net" - "github.com/KunTengRom/xfrps/utils/util" - "github.com/KunTengRom/xfrps/utils/version" - "github.com/KunTengRom/xfrps/utils/vhost" + "github.com/liudf0716/xfrps/assets" + "github.com/liudf0716/xfrps/models/config" + "github.com/liudf0716/xfrps/models/msg" + "github.com/liudf0716/xfrps/utils/log" + frpNet "github.com/liudf0716/xfrps/utils/net" + "github.com/liudf0716/xfrps/utils/util" + "github.com/liudf0716/xfrps/utils/version" + "github.com/liudf0716/xfrps/utils/vhost" "github.com/xtaci/smux" ) diff --git a/tests/echo_server.go b/tests/echo_server.go index 1fe53a6..22a599d 100644 --- a/tests/echo_server.go +++ b/tests/echo_server.go @@ -8,7 +8,7 @@ import ( "os" "syscall" - frpNet "github.com/KunTengRom/xfrps/utils/net" + frpNet "github.com/liudf0716/xfrps/utils/net" ) func StartEchoServer() { diff --git a/tests/func_test.go b/tests/func_test.go index 514b66b..0cec2ef 100644 --- a/tests/func_test.go +++ b/tests/func_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - frpNet "github.com/KunTengRom/xfrps/utils/net" + frpNet "github.com/liudf0716/xfrps/utils/net" ) var ( diff --git a/utils/net/conn.go b/utils/net/conn.go index 3d2d5c5..b7c1e84 100644 --- a/utils/net/conn.go +++ b/utils/net/conn.go @@ -19,7 +19,7 @@ import ( "net" "time" - "github.com/KunTengRom/xfrps/utils/log" + "github.com/liudf0716/xfrps/utils/log" ) // Conn is the interface of connections used in frp. diff --git a/utils/net/tcp.go b/utils/net/tcp.go index 39503a6..5d08cbd 100644 --- a/utils/net/tcp.go +++ b/utils/net/tcp.go @@ -22,7 +22,7 @@ import ( "net/http" "net/url" - "github.com/KunTengRom/xfrps/utils/log" + "github.com/liudf0716/xfrps/utils/log" ) type TcpListener struct { diff --git a/utils/net/udp.go b/utils/net/udp.go index 074bc6d..70f0a11 100644 --- a/utils/net/udp.go +++ b/utils/net/udp.go @@ -21,8 +21,8 @@ import ( "sync" "time" - "github.com/KunTengRom/xfrps/utils/log" - "github.com/KunTengRom/xfrps/utils/pool" + "github.com/liudf0716/xfrps/utils/log" + "github.com/liudf0716/xfrps/utils/pool" ) type UdpPacket struct { diff --git a/utils/vhost/http.go b/utils/vhost/http.go index c6cfcdb..4536cb3 100644 --- a/utils/vhost/http.go +++ b/utils/vhost/http.go @@ -25,8 +25,8 @@ import ( "strings" "time" - frpNet "github.com/KunTengRom/xfrps/utils/net" - "github.com/KunTengRom/xfrps/utils/pool" + frpNet "github.com/liudf0716/xfrps/utils/net" + "github.com/liudf0716/xfrps/utils/pool" ) type HttpMuxer struct { diff --git a/utils/vhost/https.go b/utils/vhost/https.go index f5a086b..9e677bf 100644 --- a/utils/vhost/https.go +++ b/utils/vhost/https.go @@ -20,8 +20,8 @@ import ( "strings" "time" - frpNet "github.com/KunTengRom/xfrps/utils/net" - "github.com/KunTengRom/xfrps/utils/pool" + frpNet "github.com/liudf0716/xfrps/utils/net" + "github.com/liudf0716/xfrps/utils/pool" ) const ( diff --git a/utils/vhost/vhost.go b/utils/vhost/vhost.go index 0cb39f8..4950899 100644 --- a/utils/vhost/vhost.go +++ b/utils/vhost/vhost.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "github.com/KunTengRom/xfrps/utils/log" - frpNet "github.com/KunTengRom/xfrps/utils/net" + "github.com/liudf0716/xfrps/utils/log" + frpNet "github.com/liudf0716/xfrps/utils/net" ) type muxFunc func(frpNet.Conn) (frpNet.Conn, map[string]string, error) diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index c836416..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 8a4a658..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) -) - -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } - - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } - } -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } - } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 1fe3cf3..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 7c519ff..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index df1d582..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound == true: - d.w.Write(nilAngleBytes) - - case cycleFound == true: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index c49875b..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound == true: - f.fs.Write(nilAngleBytes) - - case cycleFound == true: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/docopt/docopt-go/.gitignore b/vendor/github.com/docopt/docopt-go/.gitignore deleted file mode 100644 index 49ad16c..0000000 --- a/vendor/github.com/docopt/docopt-go/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# coverage droppings -profile.cov diff --git a/vendor/github.com/docopt/docopt-go/.travis.yml b/vendor/github.com/docopt/docopt-go/.travis.yml deleted file mode 100644 index 65fad59..0000000 --- a/vendor/github.com/docopt/docopt-go/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -# Travis CI (http://travis-ci.org/) is a continuous integration -# service for open source projects. This file configures it -# to run unit tests for docopt-go. - -language: go - -go: - - 1.4 - - 1.5 - - tip - -matrix: - fast_finish: true - -before_install: - - go get golang.org/x/tools/cmd/vet - - go get golang.org/x/tools/cmd/cover - - go get github.com/golang/lint/golint - - go get github.com/mattn/goveralls - -install: - - go get -d -v ./... && go build -v ./... - -script: - - go vet -x ./... - - $HOME/gopath/bin/golint ./... - - go test -v ./... - - go test -covermode=count -coverprofile=profile.cov . - -after_script: - - $HOME/gopath/bin/goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/docopt/docopt-go/LICENSE b/vendor/github.com/docopt/docopt-go/LICENSE deleted file mode 100644 index 8841af1..0000000 --- a/vendor/github.com/docopt/docopt-go/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Keith Batten - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docopt/docopt-go/README.md b/vendor/github.com/docopt/docopt-go/README.md deleted file mode 100644 index 71c92aa..0000000 --- a/vendor/github.com/docopt/docopt-go/README.md +++ /dev/null @@ -1,88 +0,0 @@ -docopt-go -========= - -[![Build Status](https://travis-ci.org/docopt/docopt.go.svg?branch=master)](https://travis-ci.org/docopt/docopt.go) -[![Coverage Status](https://coveralls.io/repos/docopt/docopt.go/badge.png)](https://coveralls.io/r/docopt/docopt.go) -[![GoDoc](https://godoc.org/github.com/docopt/docopt.go?status.png)](https://godoc.org/github.com/docopt/docopt.go) - -An implementation of [docopt](http://docopt.org/) in the -[Go](http://golang.org/) programming language. - -**docopt** helps you create *beautiful* command-line interfaces easily: - -```go -package main - -import ( - "fmt" - "github.com/docopt/docopt-go" -) - -func main() { - usage := `Naval Fate. - -Usage: - naval_fate ship new ... - naval_fate ship move [--speed=] - naval_fate ship shoot - naval_fate mine (set|remove) [--moored|--drifting] - naval_fate -h | --help - naval_fate --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Moored (anchored) mine. - --drifting Drifting mine.` - - arguments, _ := docopt.Parse(usage, nil, true, "Naval Fate 2.0", false) - fmt.Println(arguments) -} -``` - -**docopt** parses command-line arguments based on a help message. Don't -write parser code: a good help message already has all the necessary -information in it. - -## Installation - -⚠ Use the alias “docopt-go”. To use docopt in your Go code: - -```go -import "github.com/docopt/docopt-go" -``` - -To install docopt according to your `$GOPATH`: - -```console -$ go get github.com/docopt/docopt-go -``` - -## API - -```go -func Parse(doc string, argv []string, help bool, version string, - optionsFirst bool, exit ...bool) (map[string]interface{}, error) -``` -Parse `argv` based on the command-line interface described in `doc`. - -Given a conventional command-line help message, docopt creates a parser and -processes the arguments. See -https://github.com/docopt/docopt#help-message-format for a description of the -help message format. If `argv` is `nil`, `os.Args[1:]` is used. - -docopt returns a map of option names to the values parsed from `argv`, and an -error or `nil`. - -More documentation for docopt is available at -[GoDoc.org](https://godoc.org/github.com/docopt/docopt.go). - -## Testing - -All tests from the Python version are implemented and passing -at [Travis CI](https://travis-ci.org/docopt/docopt.go). New -language-agnostic tests have been added -to [test_golang.docopt](test_golang.docopt). - -To run tests for docopt-go, use `go test`. diff --git a/vendor/github.com/docopt/docopt-go/docopt.go b/vendor/github.com/docopt/docopt-go/docopt.go deleted file mode 100644 index d929fc3..0000000 --- a/vendor/github.com/docopt/docopt-go/docopt.go +++ /dev/null @@ -1,1239 +0,0 @@ -// Licensed under terms of MIT license (see LICENSE-MIT) -// Copyright (c) 2013 Keith Batten, kbatten@gmail.com - -/* -Package docopt parses command-line arguments based on a help message. - -⚠ Use the alias “docopt-go”: - import "github.com/docopt/docopt-go" -or - $ go get github.com/docopt/docopt-go -*/ -package docopt - -import ( - "fmt" - "os" - "reflect" - "regexp" - "strings" - "unicode" -) - -/* -Parse `argv` based on the command-line interface described in `doc`. - -Given a conventional command-line help message, docopt creates a parser and -processes the arguments. See -https://github.com/docopt/docopt#help-message-format for a description of the -help message format. If `argv` is `nil`, `os.Args[1:]` is used. - -docopt returns a map of option names to the values parsed from `argv`, and an -error or `nil`. - -Set `help` to `false` to disable automatic help messages on `-h` or `--help`. -If `version` is a non-empty string, it will be printed when `--version` is -specified. Set `optionsFirst` to `true` to require that options always come -before positional arguments; otherwise they can overlap. - -By default, docopt calls `os.Exit(0)` if it handled a built-in option such as -`-h` or `--version`. If the user errored with a wrong command or options, -docopt exits with a return code of 1. To stop docopt from calling `os.Exit()` -and to handle your own return codes, pass an optional last parameter of `false` -for `exit`. -*/ -func Parse(doc string, argv []string, help bool, version string, - optionsFirst bool, exit ...bool) (map[string]interface{}, error) { - // if "false" was the (optional) last arg, don't call os.Exit() - exitOk := true - if len(exit) > 0 { - exitOk = exit[0] - } - args, output, err := parse(doc, argv, help, version, optionsFirst) - if _, ok := err.(*UserError); ok { - // the user gave us bad input - fmt.Fprintln(os.Stderr, output) - if exitOk { - os.Exit(1) - } - } else if len(output) > 0 && err == nil { - // the user asked for help or `--version` - fmt.Println(output) - if exitOk { - os.Exit(0) - } - } - return args, err -} - -// parse and return a map of args, output and all errors -func parse(doc string, argv []string, help bool, version string, optionsFirst bool) (args map[string]interface{}, output string, err error) { - if argv == nil && len(os.Args) > 1 { - argv = os.Args[1:] - } - - usageSections := parseSection("usage:", doc) - - if len(usageSections) == 0 { - err = newLanguageError("\"usage:\" (case-insensitive) not found.") - return - } - if len(usageSections) > 1 { - err = newLanguageError("More than one \"usage:\" (case-insensitive).") - return - } - usage := usageSections[0] - - options := parseDefaults(doc) - formal, err := formalUsage(usage) - if err != nil { - output = handleError(err, usage) - return - } - - pat, err := parsePattern(formal, &options) - if err != nil { - output = handleError(err, usage) - return - } - - patternArgv, err := parseArgv(newTokenList(argv, errorUser), &options, optionsFirst) - if err != nil { - output = handleError(err, usage) - return - } - patFlat, err := pat.flat(patternOption) - if err != nil { - output = handleError(err, usage) - return - } - patternOptions := patFlat.unique() - - patFlat, err = pat.flat(patternOptionSSHORTCUT) - if err != nil { - output = handleError(err, usage) - return - } - for _, optionsShortcut := range patFlat { - docOptions := parseDefaults(doc) - optionsShortcut.children = docOptions.unique().diff(patternOptions) - } - - if output = extras(help, version, patternArgv, doc); len(output) > 0 { - return - } - - err = pat.fix() - if err != nil { - output = handleError(err, usage) - return - } - matched, left, collected := pat.match(&patternArgv, nil) - if matched && len(*left) == 0 { - patFlat, err = pat.flat(patternDefault) - if err != nil { - output = handleError(err, usage) - return - } - args = append(patFlat, *collected...).dictionary() - return - } - - err = newUserError("") - output = handleError(err, usage) - return -} - -func handleError(err error, usage string) string { - if _, ok := err.(*UserError); ok { - return strings.TrimSpace(fmt.Sprintf("%s\n%s", err, usage)) - } - return "" -} - -func parseSection(name, source string) []string { - p := regexp.MustCompile(`(?im)^([^\n]*` + name + `[^\n]*\n?(?:[ \t].*?(?:\n|$))*)`) - s := p.FindAllString(source, -1) - if s == nil { - s = []string{} - } - for i, v := range s { - s[i] = strings.TrimSpace(v) - } - return s -} - -func parseDefaults(doc string) patternList { - defaults := patternList{} - p := regexp.MustCompile(`\n[ \t]*(-\S+?)`) - for _, s := range parseSection("options:", doc) { - // FIXME corner case "bla: options: --foo" - _, _, s = stringPartition(s, ":") // get rid of "options:" - split := p.Split("\n"+s, -1)[1:] - match := p.FindAllStringSubmatch("\n"+s, -1) - for i := range split { - optionDescription := match[i][1] + split[i] - if strings.HasPrefix(optionDescription, "-") { - defaults = append(defaults, parseOption(optionDescription)) - } - } - } - return defaults -} - -func parsePattern(source string, options *patternList) (*pattern, error) { - tokens := tokenListFromPattern(source) - result, err := parseExpr(tokens, options) - if err != nil { - return nil, err - } - if tokens.current() != nil { - return nil, tokens.errorFunc("unexpected ending: %s" + strings.Join(tokens.tokens, " ")) - } - return newRequired(result...), nil -} - -func parseArgv(tokens *tokenList, options *patternList, optionsFirst bool) (patternList, error) { - /* - Parse command-line argument vector. - - If options_first: - argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ; - else: - argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ; - */ - parsed := patternList{} - for tokens.current() != nil { - if tokens.current().eq("--") { - for _, v := range tokens.tokens { - parsed = append(parsed, newArgument("", v)) - } - return parsed, nil - } else if tokens.current().hasPrefix("--") { - pl, err := parseLong(tokens, options) - if err != nil { - return nil, err - } - parsed = append(parsed, pl...) - } else if tokens.current().hasPrefix("-") && !tokens.current().eq("-") { - ps, err := parseShorts(tokens, options) - if err != nil { - return nil, err - } - parsed = append(parsed, ps...) - } else if optionsFirst { - for _, v := range tokens.tokens { - parsed = append(parsed, newArgument("", v)) - } - return parsed, nil - } else { - parsed = append(parsed, newArgument("", tokens.move().String())) - } - } - return parsed, nil -} - -func parseOption(optionDescription string) *pattern { - optionDescription = strings.TrimSpace(optionDescription) - options, _, description := stringPartition(optionDescription, " ") - options = strings.Replace(options, ",", " ", -1) - options = strings.Replace(options, "=", " ", -1) - - short := "" - long := "" - argcount := 0 - var value interface{} - value = false - - reDefault := regexp.MustCompile(`(?i)\[default: (.*)\]`) - for _, s := range strings.Fields(options) { - if strings.HasPrefix(s, "--") { - long = s - } else if strings.HasPrefix(s, "-") { - short = s - } else { - argcount = 1 - } - if argcount > 0 { - matched := reDefault.FindAllStringSubmatch(description, -1) - if len(matched) > 0 { - value = matched[0][1] - } else { - value = nil - } - } - } - return newOption(short, long, argcount, value) -} - -func parseExpr(tokens *tokenList, options *patternList) (patternList, error) { - // expr ::= seq ( '|' seq )* ; - seq, err := parseSeq(tokens, options) - if err != nil { - return nil, err - } - if !tokens.current().eq("|") { - return seq, nil - } - var result patternList - if len(seq) > 1 { - result = patternList{newRequired(seq...)} - } else { - result = seq - } - for tokens.current().eq("|") { - tokens.move() - seq, err = parseSeq(tokens, options) - if err != nil { - return nil, err - } - if len(seq) > 1 { - result = append(result, newRequired(seq...)) - } else { - result = append(result, seq...) - } - } - if len(result) > 1 { - return patternList{newEither(result...)}, nil - } - return result, nil -} - -func parseSeq(tokens *tokenList, options *patternList) (patternList, error) { - // seq ::= ( atom [ '...' ] )* ; - result := patternList{} - for !tokens.current().match(true, "]", ")", "|") { - atom, err := parseAtom(tokens, options) - if err != nil { - return nil, err - } - if tokens.current().eq("...") { - atom = patternList{newOneOrMore(atom...)} - tokens.move() - } - result = append(result, atom...) - } - return result, nil -} - -func parseAtom(tokens *tokenList, options *patternList) (patternList, error) { - // atom ::= '(' expr ')' | '[' expr ']' | 'options' | long | shorts | argument | command ; - tok := tokens.current() - result := patternList{} - if tokens.current().match(false, "(", "[") { - tokens.move() - var matching string - pl, err := parseExpr(tokens, options) - if err != nil { - return nil, err - } - if tok.eq("(") { - matching = ")" - result = patternList{newRequired(pl...)} - } else if tok.eq("[") { - matching = "]" - result = patternList{newOptional(pl...)} - } - moved := tokens.move() - if !moved.eq(matching) { - return nil, tokens.errorFunc("unmatched '%s', expected: '%s' got: '%s'", tok, matching, moved) - } - return result, nil - } else if tok.eq("options") { - tokens.move() - return patternList{newOptionsShortcut()}, nil - } else if tok.hasPrefix("--") && !tok.eq("--") { - return parseLong(tokens, options) - } else if tok.hasPrefix("-") && !tok.eq("-") && !tok.eq("--") { - return parseShorts(tokens, options) - } else if tok.hasPrefix("<") && tok.hasSuffix(">") || tok.isUpper() { - return patternList{newArgument(tokens.move().String(), nil)}, nil - } - return patternList{newCommand(tokens.move().String(), false)}, nil -} - -func parseLong(tokens *tokenList, options *patternList) (patternList, error) { - // long ::= '--' chars [ ( ' ' | '=' ) chars ] ; - long, eq, v := stringPartition(tokens.move().String(), "=") - var value interface{} - var opt *pattern - if eq == "" && v == "" { - value = nil - } else { - value = v - } - - if !strings.HasPrefix(long, "--") { - return nil, newError("long option '%s' doesn't start with --", long) - } - similar := patternList{} - for _, o := range *options { - if o.long == long { - similar = append(similar, o) - } - } - if tokens.err == errorUser && len(similar) == 0 { // if no exact match - similar = patternList{} - for _, o := range *options { - if strings.HasPrefix(o.long, long) { - similar = append(similar, o) - } - } - } - if len(similar) > 1 { // might be simply specified ambiguously 2+ times? - similarLong := make([]string, len(similar)) - for i, s := range similar { - similarLong[i] = s.long - } - return nil, tokens.errorFunc("%s is not a unique prefix: %s?", long, strings.Join(similarLong, ", ")) - } else if len(similar) < 1 { - argcount := 0 - if eq == "=" { - argcount = 1 - } - opt = newOption("", long, argcount, false) - *options = append(*options, opt) - if tokens.err == errorUser { - var val interface{} - if argcount > 0 { - val = value - } else { - val = true - } - opt = newOption("", long, argcount, val) - } - } else { - opt = newOption(similar[0].short, similar[0].long, similar[0].argcount, similar[0].value) - if opt.argcount == 0 { - if value != nil { - return nil, tokens.errorFunc("%s must not have an argument", opt.long) - } - } else { - if value == nil { - if tokens.current().match(true, "--") { - return nil, tokens.errorFunc("%s requires argument", opt.long) - } - moved := tokens.move() - if moved != nil { - value = moved.String() // only set as string if not nil - } - } - } - if tokens.err == errorUser { - if value != nil { - opt.value = value - } else { - opt.value = true - } - } - } - - return patternList{opt}, nil -} - -func parseShorts(tokens *tokenList, options *patternList) (patternList, error) { - // shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ; - tok := tokens.move() - if !tok.hasPrefix("-") || tok.hasPrefix("--") { - return nil, newError("short option '%s' doesn't start with -", tok) - } - left := strings.TrimLeft(tok.String(), "-") - parsed := patternList{} - for left != "" { - var opt *pattern - short := "-" + left[0:1] - left = left[1:] - similar := patternList{} - for _, o := range *options { - if o.short == short { - similar = append(similar, o) - } - } - if len(similar) > 1 { - return nil, tokens.errorFunc("%s is specified ambiguously %d times", short, len(similar)) - } else if len(similar) < 1 { - opt = newOption(short, "", 0, false) - *options = append(*options, opt) - if tokens.err == errorUser { - opt = newOption(short, "", 0, true) - } - } else { // why copying is necessary here? - opt = newOption(short, similar[0].long, similar[0].argcount, similar[0].value) - var value interface{} - if opt.argcount > 0 { - if left == "" { - if tokens.current().match(true, "--") { - return nil, tokens.errorFunc("%s requires argument", short) - } - value = tokens.move().String() - } else { - value = left - left = "" - } - } - if tokens.err == errorUser { - if value != nil { - opt.value = value - } else { - opt.value = true - } - } - } - parsed = append(parsed, opt) - } - return parsed, nil -} - -func newTokenList(source []string, err errorType) *tokenList { - errorFunc := newError - if err == errorUser { - errorFunc = newUserError - } else if err == errorLanguage { - errorFunc = newLanguageError - } - return &tokenList{source, errorFunc, err} -} - -func tokenListFromString(source string) *tokenList { - return newTokenList(strings.Fields(source), errorUser) -} - -func tokenListFromPattern(source string) *tokenList { - p := regexp.MustCompile(`([\[\]\(\)\|]|\.\.\.)`) - source = p.ReplaceAllString(source, ` $1 `) - p = regexp.MustCompile(`\s+|(\S*<.*?>)`) - split := p.Split(source, -1) - match := p.FindAllStringSubmatch(source, -1) - var result []string - l := len(split) - for i := 0; i < l; i++ { - if len(split[i]) > 0 { - result = append(result, split[i]) - } - if i < l-1 && len(match[i][1]) > 0 { - result = append(result, match[i][1]) - } - } - return newTokenList(result, errorLanguage) -} - -func formalUsage(section string) (string, error) { - _, _, section = stringPartition(section, ":") // drop "usage:" - pu := strings.Fields(section) - - if len(pu) == 0 { - return "", newLanguageError("no fields found in usage (perhaps a spacing error).") - } - - result := "( " - for _, s := range pu[1:] { - if s == pu[0] { - result += ") | ( " - } else { - result += s + " " - } - } - result += ")" - - return result, nil -} - -func extras(help bool, version string, options patternList, doc string) string { - if help { - for _, o := range options { - if (o.name == "-h" || o.name == "--help") && o.value == true { - return strings.Trim(doc, "\n") - } - } - } - if version != "" { - for _, o := range options { - if (o.name == "--version") && o.value == true { - return version - } - } - } - return "" -} - -type errorType int - -const ( - errorUser errorType = iota - errorLanguage -) - -func (e errorType) String() string { - switch e { - case errorUser: - return "errorUser" - case errorLanguage: - return "errorLanguage" - } - return "" -} - -// UserError records an error with program arguments. -type UserError struct { - msg string - Usage string -} - -func (e UserError) Error() string { - return e.msg -} -func newUserError(msg string, f ...interface{}) error { - return &UserError{fmt.Sprintf(msg, f...), ""} -} - -// LanguageError records an error with the doc string. -type LanguageError struct { - msg string -} - -func (e LanguageError) Error() string { - return e.msg -} -func newLanguageError(msg string, f ...interface{}) error { - return &LanguageError{fmt.Sprintf(msg, f...)} -} - -var newError = fmt.Errorf - -type tokenList struct { - tokens []string - errorFunc func(string, ...interface{}) error - err errorType -} -type token string - -func (t *token) eq(s string) bool { - if t == nil { - return false - } - return string(*t) == s -} -func (t *token) match(matchNil bool, tokenStrings ...string) bool { - if t == nil && matchNil { - return true - } else if t == nil && !matchNil { - return false - } - - for _, tok := range tokenStrings { - if tok == string(*t) { - return true - } - } - return false -} -func (t *token) hasPrefix(prefix string) bool { - if t == nil { - return false - } - return strings.HasPrefix(string(*t), prefix) -} -func (t *token) hasSuffix(suffix string) bool { - if t == nil { - return false - } - return strings.HasSuffix(string(*t), suffix) -} -func (t *token) isUpper() bool { - if t == nil { - return false - } - return isStringUppercase(string(*t)) -} -func (t *token) String() string { - if t == nil { - return "" - } - return string(*t) -} - -func (tl *tokenList) current() *token { - if len(tl.tokens) > 0 { - return (*token)(&(tl.tokens[0])) - } - return nil -} - -func (tl *tokenList) length() int { - return len(tl.tokens) -} - -func (tl *tokenList) move() *token { - if len(tl.tokens) > 0 { - t := tl.tokens[0] - tl.tokens = tl.tokens[1:] - return (*token)(&t) - } - return nil -} - -type patternType uint - -const ( - // leaf - patternArgument patternType = 1 << iota - patternCommand - patternOption - - // branch - patternRequired - patternOptionAL - patternOptionSSHORTCUT // Marker/placeholder for [options] shortcut. - patternOneOrMore - patternEither - - patternLeaf = patternArgument + - patternCommand + - patternOption - patternBranch = patternRequired + - patternOptionAL + - patternOptionSSHORTCUT + - patternOneOrMore + - patternEither - patternAll = patternLeaf + patternBranch - patternDefault = 0 -) - -func (pt patternType) String() string { - switch pt { - case patternArgument: - return "argument" - case patternCommand: - return "command" - case patternOption: - return "option" - case patternRequired: - return "required" - case patternOptionAL: - return "optional" - case patternOptionSSHORTCUT: - return "optionsshortcut" - case patternOneOrMore: - return "oneormore" - case patternEither: - return "either" - case patternLeaf: - return "leaf" - case patternBranch: - return "branch" - case patternAll: - return "all" - case patternDefault: - return "default" - } - return "" -} - -type pattern struct { - t patternType - - children patternList - - name string - value interface{} - - short string - long string - argcount int -} - -type patternList []*pattern - -func newBranchPattern(t patternType, pl ...*pattern) *pattern { - var p pattern - p.t = t - p.children = make(patternList, len(pl)) - copy(p.children, pl) - return &p -} - -func newRequired(pl ...*pattern) *pattern { - return newBranchPattern(patternRequired, pl...) -} - -func newEither(pl ...*pattern) *pattern { - return newBranchPattern(patternEither, pl...) -} - -func newOneOrMore(pl ...*pattern) *pattern { - return newBranchPattern(patternOneOrMore, pl...) -} - -func newOptional(pl ...*pattern) *pattern { - return newBranchPattern(patternOptionAL, pl...) -} - -func newOptionsShortcut() *pattern { - var p pattern - p.t = patternOptionSSHORTCUT - return &p -} - -func newLeafPattern(t patternType, name string, value interface{}) *pattern { - // default: value=nil - var p pattern - p.t = t - p.name = name - p.value = value - return &p -} - -func newArgument(name string, value interface{}) *pattern { - // default: value=nil - return newLeafPattern(patternArgument, name, value) -} - -func newCommand(name string, value interface{}) *pattern { - // default: value=false - var p pattern - p.t = patternCommand - p.name = name - p.value = value - return &p -} - -func newOption(short, long string, argcount int, value interface{}) *pattern { - // default: "", "", 0, false - var p pattern - p.t = patternOption - p.short = short - p.long = long - if long != "" { - p.name = long - } else { - p.name = short - } - p.argcount = argcount - if value == false && argcount > 0 { - p.value = nil - } else { - p.value = value - } - return &p -} - -func (p *pattern) flat(types patternType) (patternList, error) { - if p.t&patternLeaf != 0 { - if types == patternDefault { - types = patternAll - } - if p.t&types != 0 { - return patternList{p}, nil - } - return patternList{}, nil - } - - if p.t&patternBranch != 0 { - if p.t&types != 0 { - return patternList{p}, nil - } - result := patternList{} - for _, child := range p.children { - childFlat, err := child.flat(types) - if err != nil { - return nil, err - } - result = append(result, childFlat...) - } - return result, nil - } - return nil, newError("unknown pattern type: %d, %d", p.t, types) -} - -func (p *pattern) fix() error { - err := p.fixIdentities(nil) - if err != nil { - return err - } - p.fixRepeatingArguments() - return nil -} - -func (p *pattern) fixIdentities(uniq patternList) error { - // Make pattern-tree tips point to same object if they are equal. - if p.t&patternBranch == 0 { - return nil - } - if uniq == nil { - pFlat, err := p.flat(patternDefault) - if err != nil { - return err - } - uniq = pFlat.unique() - } - for i, child := range p.children { - if child.t&patternBranch == 0 { - ind, err := uniq.index(child) - if err != nil { - return err - } - p.children[i] = uniq[ind] - } else { - err := child.fixIdentities(uniq) - if err != nil { - return err - } - } - } - return nil -} - -func (p *pattern) fixRepeatingArguments() { - // Fix elements that should accumulate/increment values. - var either []patternList - - for _, child := range p.transform().children { - either = append(either, child.children) - } - for _, cas := range either { - casMultiple := patternList{} - for _, e := range cas { - if cas.count(e) > 1 { - casMultiple = append(casMultiple, e) - } - } - for _, e := range casMultiple { - if e.t == patternArgument || e.t == patternOption && e.argcount > 0 { - switch e.value.(type) { - case string: - e.value = strings.Fields(e.value.(string)) - case []string: - default: - e.value = []string{} - } - } - if e.t == patternCommand || e.t == patternOption && e.argcount == 0 { - e.value = 0 - } - } - } -} - -func (p *pattern) match(left *patternList, collected *patternList) (bool, *patternList, *patternList) { - if collected == nil { - collected = &patternList{} - } - if p.t&patternRequired != 0 { - l := left - c := collected - for _, p := range p.children { - var matched bool - matched, l, c = p.match(l, c) - if !matched { - return false, left, collected - } - } - return true, l, c - } else if p.t&patternOptionAL != 0 || p.t&patternOptionSSHORTCUT != 0 { - for _, p := range p.children { - _, left, collected = p.match(left, collected) - } - return true, left, collected - } else if p.t&patternOneOrMore != 0 { - if len(p.children) != 1 { - panic("OneOrMore.match(): assert len(p.children) == 1") - } - l := left - c := collected - var lAlt *patternList - matched := true - times := 0 - for matched { - // could it be that something didn't match but changed l or c? - matched, l, c = p.children[0].match(l, c) - if matched { - times++ - } - if lAlt == l { - break - } - lAlt = l - } - if times >= 1 { - return true, l, c - } - return false, left, collected - } else if p.t&patternEither != 0 { - type outcomeStruct struct { - matched bool - left *patternList - collected *patternList - length int - } - outcomes := []outcomeStruct{} - for _, p := range p.children { - matched, l, c := p.match(left, collected) - outcome := outcomeStruct{matched, l, c, len(*l)} - if matched { - outcomes = append(outcomes, outcome) - } - } - if len(outcomes) > 0 { - minLen := outcomes[0].length - minIndex := 0 - for i, v := range outcomes { - if v.length < minLen { - minIndex = i - } - } - return outcomes[minIndex].matched, outcomes[minIndex].left, outcomes[minIndex].collected - } - return false, left, collected - } else if p.t&patternLeaf != 0 { - pos, match := p.singleMatch(left) - var increment interface{} - if match == nil { - return false, left, collected - } - leftAlt := make(patternList, len((*left)[:pos]), len((*left)[:pos])+len((*left)[pos+1:])) - copy(leftAlt, (*left)[:pos]) - leftAlt = append(leftAlt, (*left)[pos+1:]...) - sameName := patternList{} - for _, a := range *collected { - if a.name == p.name { - sameName = append(sameName, a) - } - } - - switch p.value.(type) { - case int, []string: - switch p.value.(type) { - case int: - increment = 1 - case []string: - switch match.value.(type) { - case string: - increment = []string{match.value.(string)} - default: - increment = match.value - } - } - if len(sameName) == 0 { - match.value = increment - collectedMatch := make(patternList, len(*collected), len(*collected)+1) - copy(collectedMatch, *collected) - collectedMatch = append(collectedMatch, match) - return true, &leftAlt, &collectedMatch - } - switch sameName[0].value.(type) { - case int: - sameName[0].value = sameName[0].value.(int) + increment.(int) - case []string: - sameName[0].value = append(sameName[0].value.([]string), increment.([]string)...) - } - return true, &leftAlt, collected - } - collectedMatch := make(patternList, len(*collected), len(*collected)+1) - copy(collectedMatch, *collected) - collectedMatch = append(collectedMatch, match) - return true, &leftAlt, &collectedMatch - } - panic("unmatched type") -} - -func (p *pattern) singleMatch(left *patternList) (int, *pattern) { - if p.t&patternArgument != 0 { - for n, pat := range *left { - if pat.t&patternArgument != 0 { - return n, newArgument(p.name, pat.value) - } - } - return -1, nil - } else if p.t&patternCommand != 0 { - for n, pat := range *left { - if pat.t&patternArgument != 0 { - if pat.value == p.name { - return n, newCommand(p.name, true) - } - break - } - } - return -1, nil - } else if p.t&patternOption != 0 { - for n, pat := range *left { - if p.name == pat.name { - return n, pat - } - } - return -1, nil - } - panic("unmatched type") -} - -func (p *pattern) String() string { - if p.t&patternOption != 0 { - return fmt.Sprintf("%s(%s, %s, %d, %+v)", p.t, p.short, p.long, p.argcount, p.value) - } else if p.t&patternLeaf != 0 { - return fmt.Sprintf("%s(%s, %+v)", p.t, p.name, p.value) - } else if p.t&patternBranch != 0 { - result := "" - for i, child := range p.children { - if i > 0 { - result += ", " - } - result += child.String() - } - return fmt.Sprintf("%s(%s)", p.t, result) - } - panic("unmatched type") -} - -func (p *pattern) transform() *pattern { - /* - Expand pattern into an (almost) equivalent one, but with single Either. - - Example: ((-a | -b) (-c | -d)) => (-a -c | -a -d | -b -c | -b -d) - Quirks: [-a] => (-a), (-a...) => (-a -a) - */ - result := []patternList{} - groups := []patternList{patternList{p}} - parents := patternRequired + - patternOptionAL + - patternOptionSSHORTCUT + - patternEither + - patternOneOrMore - for len(groups) > 0 { - children := groups[0] - groups = groups[1:] - var child *pattern - for _, c := range children { - if c.t&parents != 0 { - child = c - break - } - } - if child != nil { - children.remove(child) - if child.t&patternEither != 0 { - for _, c := range child.children { - r := patternList{} - r = append(r, c) - r = append(r, children...) - groups = append(groups, r) - } - } else if child.t&patternOneOrMore != 0 { - r := patternList{} - r = append(r, child.children.double()...) - r = append(r, children...) - groups = append(groups, r) - } else { - r := patternList{} - r = append(r, child.children...) - r = append(r, children...) - groups = append(groups, r) - } - } else { - result = append(result, children) - } - } - either := patternList{} - for _, e := range result { - either = append(either, newRequired(e...)) - } - return newEither(either...) -} - -func (p *pattern) eq(other *pattern) bool { - return reflect.DeepEqual(p, other) -} - -func (pl patternList) unique() patternList { - table := make(map[string]bool) - result := patternList{} - for _, v := range pl { - if !table[v.String()] { - table[v.String()] = true - result = append(result, v) - } - } - return result -} - -func (pl patternList) index(p *pattern) (int, error) { - for i, c := range pl { - if c.eq(p) { - return i, nil - } - } - return -1, newError("%s not in list", p) -} - -func (pl patternList) count(p *pattern) int { - count := 0 - for _, c := range pl { - if c.eq(p) { - count++ - } - } - return count -} - -func (pl patternList) diff(l patternList) patternList { - lAlt := make(patternList, len(l)) - copy(lAlt, l) - result := make(patternList, 0, len(pl)) - for _, v := range pl { - if v != nil { - match := false - for i, w := range lAlt { - if w.eq(v) { - match = true - lAlt[i] = nil - break - } - } - if match == false { - result = append(result, v) - } - } - } - return result -} - -func (pl patternList) double() patternList { - l := len(pl) - result := make(patternList, l*2) - copy(result, pl) - copy(result[l:2*l], pl) - return result -} - -func (pl *patternList) remove(p *pattern) { - (*pl) = pl.diff(patternList{p}) -} - -func (pl patternList) dictionary() map[string]interface{} { - dict := make(map[string]interface{}) - for _, a := range pl { - dict[a.name] = a.value - } - return dict -} - -func stringPartition(s, sep string) (string, string, string) { - sepPos := strings.Index(s, sep) - if sepPos == -1 { // no seperator found - return s, "", "" - } - split := strings.SplitN(s, sep, 2) - return split[0], sep, split[1] -} - -// returns true if all cased characters in the string are uppercase -// and there are there is at least one cased charcter -func isStringUppercase(s string) bool { - if strings.ToUpper(s) != s { - return false - } - for _, c := range []rune(s) { - if unicode.IsUpper(c) { - return true - } - } - return false -} diff --git a/vendor/github.com/docopt/docopt-go/test_golang.docopt b/vendor/github.com/docopt/docopt-go/test_golang.docopt deleted file mode 100644 index 323fd67..0000000 --- a/vendor/github.com/docopt/docopt-go/test_golang.docopt +++ /dev/null @@ -1,9 +0,0 @@ -r"""usage: prog [NAME_-2]...""" -$ prog 10 20 -{"NAME_-2": ["10", "20"]} - -$ prog 10 -{"NAME_-2": ["10"]} - -$ prog -{"NAME_-2": []} diff --git a/vendor/github.com/docopt/docopt-go/testcases.docopt b/vendor/github.com/docopt/docopt-go/testcases.docopt deleted file mode 100644 index efe9a07..0000000 --- a/vendor/github.com/docopt/docopt-go/testcases.docopt +++ /dev/null @@ -1,957 +0,0 @@ -r"""Usage: prog - -""" -$ prog -{} - -$ prog --xxx -"user-error" - - -r"""Usage: prog [options] - -Options: -a All. - -""" -$ prog -{"-a": false} - -$ prog -a -{"-a": true} - -$ prog -x -"user-error" - - -r"""Usage: prog [options] - -Options: --all All. - -""" -$ prog -{"--all": false} - -$ prog --all -{"--all": true} - -$ prog --xxx -"user-error" - - -r"""Usage: prog [options] - -Options: -v, --verbose Verbose. - -""" -$ prog --verbose -{"--verbose": true} - -$ prog --ver -{"--verbose": true} - -$ prog -v -{"--verbose": true} - - -r"""Usage: prog [options] - -Options: -p PATH - -""" -$ prog -p home/ -{"-p": "home/"} - -$ prog -phome/ -{"-p": "home/"} - -$ prog -p -"user-error" - - -r"""Usage: prog [options] - -Options: --path - -""" -$ prog --path home/ -{"--path": "home/"} - -$ prog --path=home/ -{"--path": "home/"} - -$ prog --pa home/ -{"--path": "home/"} - -$ prog --pa=home/ -{"--path": "home/"} - -$ prog --path -"user-error" - - -r"""Usage: prog [options] - -Options: -p PATH, --path= Path to files. - -""" -$ prog -proot -{"--path": "root"} - - -r"""Usage: prog [options] - -Options: -p --path PATH Path to files. - -""" -$ prog -p root -{"--path": "root"} - -$ prog --path root -{"--path": "root"} - - -r"""Usage: prog [options] - -Options: - -p PATH Path to files [default: ./] - -""" -$ prog -{"-p": "./"} - -$ prog -phome -{"-p": "home"} - - -r"""UsAgE: prog [options] - -OpTiOnS: --path= Path to files - [dEfAuLt: /root] - -""" -$ prog -{"--path": "/root"} - -$ prog --path=home -{"--path": "home"} - - -r"""usage: prog [options] - -options: - -a Add - -r Remote - -m Message - -""" -$ prog -a -r -m Hello -{"-a": true, - "-r": true, - "-m": "Hello"} - -$ prog -armyourass -{"-a": true, - "-r": true, - "-m": "yourass"} - -$ prog -a -r -{"-a": true, - "-r": true, - "-m": null} - - -r"""Usage: prog [options] - -Options: --version - --verbose - -""" -$ prog --version -{"--version": true, - "--verbose": false} - -$ prog --verbose -{"--version": false, - "--verbose": true} - -$ prog --ver -"user-error" - -$ prog --verb -{"--version": false, - "--verbose": true} - - -r"""usage: prog [-a -r -m ] - -options: - -a Add - -r Remote - -m Message - -""" -$ prog -armyourass -{"-a": true, - "-r": true, - "-m": "yourass"} - - -r"""usage: prog [-armmsg] - -options: -a Add - -r Remote - -m Message - -""" -$ prog -a -r -m Hello -{"-a": true, - "-r": true, - "-m": "Hello"} - - -r"""usage: prog -a -b - -options: - -a - -b - -""" -$ prog -a -b -{"-a": true, "-b": true} - -$ prog -b -a -{"-a": true, "-b": true} - -$ prog -a -"user-error" - -$ prog -"user-error" - - -r"""usage: prog (-a -b) - -options: -a - -b - -""" -$ prog -a -b -{"-a": true, "-b": true} - -$ prog -b -a -{"-a": true, "-b": true} - -$ prog -a -"user-error" - -$ prog -"user-error" - - -r"""usage: prog [-a] -b - -options: -a - -b - -""" -$ prog -a -b -{"-a": true, "-b": true} - -$ prog -b -a -{"-a": true, "-b": true} - -$ prog -a -"user-error" - -$ prog -b -{"-a": false, "-b": true} - -$ prog -"user-error" - - -r"""usage: prog [(-a -b)] - -options: -a - -b - -""" -$ prog -a -b -{"-a": true, "-b": true} - -$ prog -b -a -{"-a": true, "-b": true} - -$ prog -a -"user-error" - -$ prog -b -"user-error" - -$ prog -{"-a": false, "-b": false} - - -r"""usage: prog (-a|-b) - -options: -a - -b - -""" -$ prog -a -b -"user-error" - -$ prog -"user-error" - -$ prog -a -{"-a": true, "-b": false} - -$ prog -b -{"-a": false, "-b": true} - - -r"""usage: prog [ -a | -b ] - -options: -a - -b - -""" -$ prog -a -b -"user-error" - -$ prog -{"-a": false, "-b": false} - -$ prog -a -{"-a": true, "-b": false} - -$ prog -b -{"-a": false, "-b": true} - - -r"""usage: prog """ -$ prog 10 -{"": "10"} - -$ prog 10 20 -"user-error" - -$ prog -"user-error" - - -r"""usage: prog []""" -$ prog 10 -{"": "10"} - -$ prog 10 20 -"user-error" - -$ prog -{"": null} - - -r"""usage: prog """ -$ prog 10 20 40 -{"": "10", "": "20", "": "40"} - -$ prog 10 20 -"user-error" - -$ prog -"user-error" - - -r"""usage: prog [ ]""" -$ prog 10 20 40 -{"": "10", "": "20", "": "40"} - -$ prog 10 20 -{"": "10", "": "20", "": null} - -$ prog -"user-error" - - -r"""usage: prog [ | ]""" -$ prog 10 20 40 -"user-error" - -$ prog 20 40 -{"": null, "": "20", "": "40"} - -$ prog -{"": null, "": null, "": null} - - -r"""usage: prog ( --all | ) - -options: - --all - -""" -$ prog 10 --all -{"": "10", "--all": true, "": null} - -$ prog 10 -{"": null, "--all": false, "": "10"} - -$ prog -"user-error" - - -r"""usage: prog [ ]""" -$ prog 10 20 -{"": ["10", "20"]} - -$ prog 10 -{"": ["10"]} - -$ prog -{"": []} - - -r"""usage: prog [( )]""" -$ prog 10 20 -{"": ["10", "20"]} - -$ prog 10 -"user-error" - -$ prog -{"": []} - - -r"""usage: prog NAME...""" -$ prog 10 20 -{"NAME": ["10", "20"]} - -$ prog 10 -{"NAME": ["10"]} - -$ prog -"user-error" - - -r"""usage: prog [NAME]...""" -$ prog 10 20 -{"NAME": ["10", "20"]} - -$ prog 10 -{"NAME": ["10"]} - -$ prog -{"NAME": []} - - -r"""usage: prog [NAME...]""" -$ prog 10 20 -{"NAME": ["10", "20"]} - -$ prog 10 -{"NAME": ["10"]} - -$ prog -{"NAME": []} - - -r"""usage: prog [NAME [NAME ...]]""" -$ prog 10 20 -{"NAME": ["10", "20"]} - -$ prog 10 -{"NAME": ["10"]} - -$ prog -{"NAME": []} - - -r"""usage: prog (NAME | --foo NAME) - -options: --foo - -""" -$ prog 10 -{"NAME": "10", "--foo": false} - -$ prog --foo 10 -{"NAME": "10", "--foo": true} - -$ prog --foo=10 -"user-error" - - -r"""usage: prog (NAME | --foo) [--bar | NAME] - -options: --foo -options: --bar - -""" -$ prog 10 -{"NAME": ["10"], "--foo": false, "--bar": false} - -$ prog 10 20 -{"NAME": ["10", "20"], "--foo": false, "--bar": false} - -$ prog --foo --bar -{"NAME": [], "--foo": true, "--bar": true} - - -r"""Naval Fate. - -Usage: - prog ship new ... - prog ship [] move [--speed=] - prog ship shoot - prog mine (set|remove) [--moored|--drifting] - prog -h | --help - prog --version - -Options: - -h --help Show this screen. - --version Show version. - --speed= Speed in knots [default: 10]. - --moored Mored (anchored) mine. - --drifting Drifting mine. - -""" -$ prog ship Guardian move 150 300 --speed=20 -{"--drifting": false, - "--help": false, - "--moored": false, - "--speed": "20", - "--version": false, - "": ["Guardian"], - "": "150", - "": "300", - "mine": false, - "move": true, - "new": false, - "remove": false, - "set": false, - "ship": true, - "shoot": false} - - -r"""usage: prog --hello""" -$ prog --hello -{"--hello": true} - - -r"""usage: prog [--hello=]""" -$ prog -{"--hello": null} - -$ prog --hello wrld -{"--hello": "wrld"} - - -r"""usage: prog [-o]""" -$ prog -{"-o": false} - -$ prog -o -{"-o": true} - - -r"""usage: prog [-opr]""" -$ prog -op -{"-o": true, "-p": true, "-r": false} - - -r"""usage: prog --aabb | --aa""" -$ prog --aa -{"--aabb": false, "--aa": true} - -$ prog --a -"user-error" # not a unique prefix - -# -# Counting number of flags -# - -r"""Usage: prog -v""" -$ prog -v -{"-v": true} - - -r"""Usage: prog [-v -v]""" -$ prog -{"-v": 0} - -$ prog -v -{"-v": 1} - -$ prog -vv -{"-v": 2} - - -r"""Usage: prog -v ...""" -$ prog -"user-error" - -$ prog -v -{"-v": 1} - -$ prog -vv -{"-v": 2} - -$ prog -vvvvvv -{"-v": 6} - - -r"""Usage: prog [-v | -vv | -vvv] - -This one is probably most readable user-friednly variant. - -""" -$ prog -{"-v": 0} - -$ prog -v -{"-v": 1} - -$ prog -vv -{"-v": 2} - -$ prog -vvvv -"user-error" - - -r"""usage: prog [--ver --ver]""" -$ prog --ver --ver -{"--ver": 2} - - -# -# Counting commands -# - -r"""usage: prog [go]""" -$ prog go -{"go": true} - - -r"""usage: prog [go go]""" -$ prog -{"go": 0} - -$ prog go -{"go": 1} - -$ prog go go -{"go": 2} - -$ prog go go go -"user-error" - -r"""usage: prog go...""" -$ prog go go go go go -{"go": 5} - -# -# [options] does not include options from usage-pattern -# -r"""usage: prog [options] [-a] - -options: -a - -b -""" -$ prog -a -{"-a": true, "-b": false} - -$ prog -aa -"user-error" - -# -# Test [options] shourtcut -# - -r"""Usage: prog [options] A -Options: - -q Be quiet - -v Be verbose. - -""" -$ prog arg -{"A": "arg", "-v": false, "-q": false} - -$ prog -v arg -{"A": "arg", "-v": true, "-q": false} - -$ prog -q arg -{"A": "arg", "-v": false, "-q": true} - -# -# Test single dash -# - -r"""usage: prog [-]""" - -$ prog - -{"-": true} - -$ prog -{"-": false} - -# -# If argument is repeated, its value should always be a list -# - -r"""usage: prog [NAME [NAME ...]]""" - -$ prog a b -{"NAME": ["a", "b"]} - -$ prog -{"NAME": []} - -# -# Option's argument defaults to null/None -# - -r"""usage: prog [options] -options: - -a Add - -m Message - -""" -$ prog -a -{"-m": null, "-a": true} - -# -# Test options without description -# - -r"""usage: prog --hello""" -$ prog --hello -{"--hello": true} - -r"""usage: prog [--hello=]""" -$ prog -{"--hello": null} - -$ prog --hello wrld -{"--hello": "wrld"} - -r"""usage: prog [-o]""" -$ prog -{"-o": false} - -$ prog -o -{"-o": true} - -r"""usage: prog [-opr]""" -$ prog -op -{"-o": true, "-p": true, "-r": false} - -r"""usage: git [-v | --verbose]""" -$ prog -v -{"-v": true, "--verbose": false} - -r"""usage: git remote [-v | --verbose]""" -$ prog remote -v -{"remote": true, "-v": true, "--verbose": false} - -# -# Test empty usage pattern -# - -r"""usage: prog""" -$ prog -{} - -r"""usage: prog - prog -""" -$ prog 1 2 -{"": "1", "": "2"} - -$ prog -{"": null, "": null} - -r"""usage: prog - prog -""" -$ prog -{"": null, "": null} - -# -# Option's argument should not capture default value from usage pattern -# - -r"""usage: prog [--file=]""" -$ prog -{"--file": null} - -r"""usage: prog [--file=] - -options: --file - -""" -$ prog -{"--file": null} - -r"""Usage: prog [-a ] - -Options: -a, --address TCP address [default: localhost:6283]. - -""" -$ prog -{"--address": "localhost:6283"} - -# -# If option with argument could be repeated, -# its arguments should be accumulated into a list -# - -r"""usage: prog --long= ...""" - -$ prog --long one -{"--long": ["one"]} - -$ prog --long one --long two -{"--long": ["one", "two"]} - -# -# Test multiple elements repeated at once -# - -r"""usage: prog (go --speed=)...""" -$ prog go left --speed=5 go right --speed=9 -{"go": 2, "": ["left", "right"], "--speed": ["5", "9"]} - -# -# Required options should work with option shortcut -# - -r"""usage: prog [options] -a - -options: -a - -""" -$ prog -a -{"-a": true} - -# -# If option could be repeated its defaults should be split into a list -# - -r"""usage: prog [-o ]... - -options: -o [default: x] - -""" -$ prog -o this -o that -{"-o": ["this", "that"]} - -$ prog -{"-o": ["x"]} - -r"""usage: prog [-o ]... - -options: -o [default: x y] - -""" -$ prog -o this -{"-o": ["this"]} - -$ prog -{"-o": ["x", "y"]} - -# -# Test stacked option's argument -# - -r"""usage: prog -pPATH - -options: -p PATH - -""" -$ prog -pHOME -{"-p": "HOME"} - -# -# Issue 56: Repeated mutually exclusive args give nested lists sometimes -# - -r"""Usage: foo (--xx=x|--yy=y)...""" -$ prog --xx=1 --yy=2 -{"--xx": ["1"], "--yy": ["2"]} - -# -# POSIXly correct tokenization -# - -r"""usage: prog []""" -$ prog f.txt -{"": "f.txt"} - -r"""usage: prog [--input=]...""" -$ prog --input a.txt --input=b.txt -{"--input": ["a.txt", "b.txt"]} - -# -# Issue 85: `[options]` shourtcut with multiple subcommands -# - -r"""usage: prog good [options] - prog fail [options] - -options: --loglevel=N - -""" -$ prog fail --loglevel 5 -{"--loglevel": "5", "fail": true, "good": false} - -# -# Usage-section syntax -# - -r"""usage:prog --foo""" -$ prog --foo -{"--foo": true} - -r"""PROGRAM USAGE: prog --foo""" -$ prog --foo -{"--foo": true} - -r"""Usage: prog --foo - prog --bar -NOT PART OF SECTION""" -$ prog --foo -{"--foo": true, "--bar": false} - -r"""Usage: - prog --foo - prog --bar - -NOT PART OF SECTION""" -$ prog --foo -{"--foo": true, "--bar": false} - -r"""Usage: - prog --foo - prog --bar -NOT PART OF SECTION""" -$ prog --foo -{"--foo": true, "--bar": false} - -# -# Options-section syntax -# - -r"""Usage: prog [options] - -global options: --foo -local options: --baz - --bar -other options: - --egg - --spam --not-an-option- - -""" -$ prog --baz --egg -{"--foo": false, "--baz": true, "--bar": false, "--egg": true, "--spam": false} diff --git a/vendor/github.com/fatedier/beego/LICENSE b/vendor/github.com/fatedier/beego/LICENSE deleted file mode 100644 index 5dbd424..0000000 --- a/vendor/github.com/fatedier/beego/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 astaxie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/fatedier/beego/logs/README.md b/vendor/github.com/fatedier/beego/logs/README.md deleted file mode 100644 index 57d7abc..0000000 --- a/vendor/github.com/fatedier/beego/logs/README.md +++ /dev/null @@ -1,63 +0,0 @@ -## logs -logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` . - - -## How to install? - - go get github.com/astaxie/beego/logs - - -## What adapters are supported? - -As of now this logs support console, file,smtp and conn. - - -## How to use it? - -First you must import it - - import ( - "github.com/astaxie/beego/logs" - ) - -Then init a Log (example with console adapter) - - log := NewLogger(10000) - log.SetLogger("console", "") - -> the first params stand for how many channel - -Use it like this: - - log.Trace("trace") - log.Info("info") - log.Warn("warning") - log.Debug("debug") - log.Critical("critical") - - -## File adapter - -Configure file adapter like this: - - log := NewLogger(10000) - log.SetLogger("file", `{"filename":"test.log"}`) - - -## Conn adapter - -Configure like this: - - log := NewLogger(1000) - log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`) - log.Info("info") - - -## Smtp adapter - -Configure like this: - - log := NewLogger(10000) - log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`) - log.Critical("sendmail critical") - time.Sleep(time.Second * 30) diff --git a/vendor/github.com/fatedier/beego/logs/color.go b/vendor/github.com/fatedier/beego/logs/color.go deleted file mode 100644 index 41d2363..0000000 --- a/vendor/github.com/fatedier/beego/logs/color.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package logs - -import "io" - -type ansiColorWriter struct { - w io.Writer - mode outputMode -} - -func (cw *ansiColorWriter) Write(p []byte) (int, error) { - return cw.w.Write(p) -} diff --git a/vendor/github.com/fatedier/beego/logs/color_windows.go b/vendor/github.com/fatedier/beego/logs/color_windows.go deleted file mode 100644 index deee4c8..0000000 --- a/vendor/github.com/fatedier/beego/logs/color_windows.go +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package logs - -import ( - "bytes" - "io" - "strings" - "syscall" - "unsafe" -) - -type ( - csiState int - parseResult int -) - -const ( - outsideCsiCode csiState = iota - firstCsiCode - secondCsiCode -) - -const ( - noConsole parseResult = iota - changedColor - unknown -) - -type ansiColorWriter struct { - w io.Writer - mode outputMode - state csiState - paramStartBuf bytes.Buffer - paramBuf bytes.Buffer -} - -const ( - firstCsiChar byte = '\x1b' - secondeCsiChar byte = '[' - separatorChar byte = ';' - sgrCode byte = 'm' -) - -const ( - foregroundBlue = uint16(0x0001) - foregroundGreen = uint16(0x0002) - foregroundRed = uint16(0x0004) - foregroundIntensity = uint16(0x0008) - backgroundBlue = uint16(0x0010) - backgroundGreen = uint16(0x0020) - backgroundRed = uint16(0x0040) - backgroundIntensity = uint16(0x0080) - underscore = uint16(0x8000) - - foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity - backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity -) - -const ( - ansiReset = "0" - ansiIntensityOn = "1" - ansiIntensityOff = "21" - ansiUnderlineOn = "4" - ansiUnderlineOff = "24" - ansiBlinkOn = "5" - ansiBlinkOff = "25" - - ansiForegroundBlack = "30" - ansiForegroundRed = "31" - ansiForegroundGreen = "32" - ansiForegroundYellow = "33" - ansiForegroundBlue = "34" - ansiForegroundMagenta = "35" - ansiForegroundCyan = "36" - ansiForegroundWhite = "37" - ansiForegroundDefault = "39" - - ansiBackgroundBlack = "40" - ansiBackgroundRed = "41" - ansiBackgroundGreen = "42" - ansiBackgroundYellow = "43" - ansiBackgroundBlue = "44" - ansiBackgroundMagenta = "45" - ansiBackgroundCyan = "46" - ansiBackgroundWhite = "47" - ansiBackgroundDefault = "49" - - ansiLightForegroundGray = "90" - ansiLightForegroundRed = "91" - ansiLightForegroundGreen = "92" - ansiLightForegroundYellow = "93" - ansiLightForegroundBlue = "94" - ansiLightForegroundMagenta = "95" - ansiLightForegroundCyan = "96" - ansiLightForegroundWhite = "97" - - ansiLightBackgroundGray = "100" - ansiLightBackgroundRed = "101" - ansiLightBackgroundGreen = "102" - ansiLightBackgroundYellow = "103" - ansiLightBackgroundBlue = "104" - ansiLightBackgroundMagenta = "105" - ansiLightBackgroundCyan = "106" - ansiLightBackgroundWhite = "107" -) - -type drawType int - -const ( - foreground drawType = iota - background -) - -type winColor struct { - code uint16 - drawType drawType -} - -var colorMap = map[string]winColor{ - ansiForegroundBlack: {0, foreground}, - ansiForegroundRed: {foregroundRed, foreground}, - ansiForegroundGreen: {foregroundGreen, foreground}, - ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground}, - ansiForegroundBlue: {foregroundBlue, foreground}, - ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground}, - ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground}, - ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, - ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, - - ansiBackgroundBlack: {0, background}, - ansiBackgroundRed: {backgroundRed, background}, - ansiBackgroundGreen: {backgroundGreen, background}, - ansiBackgroundYellow: {backgroundRed | backgroundGreen, background}, - ansiBackgroundBlue: {backgroundBlue, background}, - ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background}, - ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background}, - ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background}, - ansiBackgroundDefault: {0, background}, - - ansiLightForegroundGray: {foregroundIntensity, foreground}, - ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground}, - ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground}, - ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground}, - ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground}, - ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground}, - ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground}, - ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground}, - - ansiLightBackgroundGray: {backgroundIntensity, background}, - ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background}, - ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background}, - ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background}, - ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background}, - ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background}, - ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background}, - ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background}, -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - defaultAttr *textAttributes -) - -func init() { - screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) - if screenInfo != nil { - colorMap[ansiForegroundDefault] = winColor{ - screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue), - foreground, - } - colorMap[ansiBackgroundDefault] = winColor{ - screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue), - background, - } - defaultAttr = convertTextAttr(screenInfo.WAttributes) - } -} - -type coord struct { - X, Y int16 -} - -type smallRect struct { - Left, Top, Right, Bottom int16 -} - -type consoleScreenBufferInfo struct { - DwSize coord - DwCursorPosition coord - WAttributes uint16 - SrWindow smallRect - DwMaximumWindowSize coord -} - -func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo { - var csbi consoleScreenBufferInfo - ret, _, _ := procGetConsoleScreenBufferInfo.Call( - hConsoleOutput, - uintptr(unsafe.Pointer(&csbi))) - if ret == 0 { - return nil - } - return &csbi -} - -func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool { - ret, _, _ := procSetConsoleTextAttribute.Call( - hConsoleOutput, - uintptr(wAttributes)) - return ret != 0 -} - -type textAttributes struct { - foregroundColor uint16 - backgroundColor uint16 - foregroundIntensity uint16 - backgroundIntensity uint16 - underscore uint16 - otherAttributes uint16 -} - -func convertTextAttr(winAttr uint16) *textAttributes { - fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue) - bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue) - fgIntensity := winAttr & foregroundIntensity - bgIntensity := winAttr & backgroundIntensity - underline := winAttr & underscore - otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore) - return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes} -} - -func convertWinAttr(textAttr *textAttributes) uint16 { - var winAttr uint16 - winAttr |= textAttr.foregroundColor - winAttr |= textAttr.backgroundColor - winAttr |= textAttr.foregroundIntensity - winAttr |= textAttr.backgroundIntensity - winAttr |= textAttr.underscore - winAttr |= textAttr.otherAttributes - return winAttr -} - -func changeColor(param []byte) parseResult { - screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) - if screenInfo == nil { - return noConsole - } - - winAttr := convertTextAttr(screenInfo.WAttributes) - strParam := string(param) - if len(strParam) <= 0 { - strParam = "0" - } - csiParam := strings.Split(strParam, string(separatorChar)) - for _, p := range csiParam { - c, ok := colorMap[p] - switch { - case !ok: - switch p { - case ansiReset: - winAttr.foregroundColor = defaultAttr.foregroundColor - winAttr.backgroundColor = defaultAttr.backgroundColor - winAttr.foregroundIntensity = defaultAttr.foregroundIntensity - winAttr.backgroundIntensity = defaultAttr.backgroundIntensity - winAttr.underscore = 0 - winAttr.otherAttributes = 0 - case ansiIntensityOn: - winAttr.foregroundIntensity = foregroundIntensity - case ansiIntensityOff: - winAttr.foregroundIntensity = 0 - case ansiUnderlineOn: - winAttr.underscore = underscore - case ansiUnderlineOff: - winAttr.underscore = 0 - case ansiBlinkOn: - winAttr.backgroundIntensity = backgroundIntensity - case ansiBlinkOff: - winAttr.backgroundIntensity = 0 - default: - // unknown code - } - case c.drawType == foreground: - winAttr.foregroundColor = c.code - case c.drawType == background: - winAttr.backgroundColor = c.code - } - } - winTextAttribute := convertWinAttr(winAttr) - setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute) - - return changedColor -} - -func parseEscapeSequence(command byte, param []byte) parseResult { - if defaultAttr == nil { - return noConsole - } - - switch command { - case sgrCode: - return changeColor(param) - default: - return unknown - } -} - -func (cw *ansiColorWriter) flushBuffer() (int, error) { - return cw.flushTo(cw.w) -} - -func (cw *ansiColorWriter) resetBuffer() (int, error) { - return cw.flushTo(nil) -} - -func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) { - var n1, n2 int - var err error - - startBytes := cw.paramStartBuf.Bytes() - cw.paramStartBuf.Reset() - if w != nil { - n1, err = cw.w.Write(startBytes) - if err != nil { - return n1, err - } - } else { - n1 = len(startBytes) - } - paramBytes := cw.paramBuf.Bytes() - cw.paramBuf.Reset() - if w != nil { - n2, err = cw.w.Write(paramBytes) - if err != nil { - return n1 + n2, err - } - } else { - n2 = len(paramBytes) - } - return n1 + n2, nil -} - -func isParameterChar(b byte) bool { - return ('0' <= b && b <= '9') || b == separatorChar -} - -func (cw *ansiColorWriter) Write(p []byte) (int, error) { - r, nw, first, last := 0, 0, 0, 0 - if cw.mode != DiscardNonColorEscSeq { - cw.state = outsideCsiCode - cw.resetBuffer() - } - - var err error - for i, ch := range p { - switch cw.state { - case outsideCsiCode: - if ch == firstCsiChar { - cw.paramStartBuf.WriteByte(ch) - cw.state = firstCsiCode - } - case firstCsiCode: - switch ch { - case firstCsiChar: - cw.paramStartBuf.WriteByte(ch) - break - case secondeCsiChar: - cw.paramStartBuf.WriteByte(ch) - cw.state = secondCsiCode - last = i - 1 - default: - cw.resetBuffer() - cw.state = outsideCsiCode - } - case secondCsiCode: - if isParameterChar(ch) { - cw.paramBuf.WriteByte(ch) - } else { - nw, err = cw.w.Write(p[first:last]) - r += nw - if err != nil { - return r, err - } - first = i + 1 - result := parseEscapeSequence(ch, cw.paramBuf.Bytes()) - if result == noConsole || (cw.mode == OutputNonColorEscSeq && result == unknown) { - cw.paramBuf.WriteByte(ch) - nw, err := cw.flushBuffer() - if err != nil { - return r, err - } - r += nw - } else { - n, _ := cw.resetBuffer() - // Add one more to the size of the buffer for the last ch - r += n + 1 - } - - cw.state = outsideCsiCode - } - default: - cw.state = outsideCsiCode - } - } - - if cw.mode != DiscardNonColorEscSeq || cw.state == outsideCsiCode { - nw, err = cw.w.Write(p[first:]) - r += nw - } - - return r, err -} diff --git a/vendor/github.com/fatedier/beego/logs/conn.go b/vendor/github.com/fatedier/beego/logs/conn.go deleted file mode 100644 index 6d5bf6b..0000000 --- a/vendor/github.com/fatedier/beego/logs/conn.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "encoding/json" - "io" - "net" - "time" -) - -// connWriter implements LoggerInterface. -// it writes messages in keep-live tcp connection. -type connWriter struct { - lg *logWriter - innerWriter io.WriteCloser - ReconnectOnMsg bool `json:"reconnectOnMsg"` - Reconnect bool `json:"reconnect"` - Net string `json:"net"` - Addr string `json:"addr"` - Level int `json:"level"` -} - -// NewConn create new ConnWrite returning as LoggerInterface. -func NewConn() Logger { - conn := new(connWriter) - conn.Level = LevelTrace - return conn -} - -// Init init connection writer with json config. -// json config only need key "level". -func (c *connWriter) Init(jsonConfig string) error { - return json.Unmarshal([]byte(jsonConfig), c) -} - -// WriteMsg write message in connection. -// if connection is down, try to re-connect. -func (c *connWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > c.Level { - return nil - } - if c.needToConnectOnMsg() { - err := c.connect() - if err != nil { - return err - } - } - - if c.ReconnectOnMsg { - defer c.innerWriter.Close() - } - - c.lg.println(when, msg) - return nil -} - -// Flush implementing method. empty. -func (c *connWriter) Flush() { - -} - -// Destroy destroy connection writer and close tcp listener. -func (c *connWriter) Destroy() { - if c.innerWriter != nil { - c.innerWriter.Close() - } -} - -func (c *connWriter) connect() error { - if c.innerWriter != nil { - c.innerWriter.Close() - c.innerWriter = nil - } - - conn, err := net.Dial(c.Net, c.Addr) - if err != nil { - return err - } - - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - } - - c.innerWriter = conn - c.lg = newLogWriter(conn) - return nil -} - -func (c *connWriter) needToConnectOnMsg() bool { - if c.Reconnect { - c.Reconnect = false - return true - } - - if c.innerWriter == nil { - return true - } - - return c.ReconnectOnMsg -} - -func init() { - Register(AdapterConn, NewConn) -} diff --git a/vendor/github.com/fatedier/beego/logs/console.go b/vendor/github.com/fatedier/beego/logs/console.go deleted file mode 100644 index e6bf6c2..0000000 --- a/vendor/github.com/fatedier/beego/logs/console.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "encoding/json" - "os" - "runtime" - "time" -) - -// brush is a color join function -type brush func(string) string - -// newBrush return a fix color Brush -func newBrush(color string) brush { - pre := "\033[" - reset := "\033[0m" - return func(text string) string { - return pre + color + "m" + text + reset - } -} - -var colors = []brush{ - newBrush("1;37"), // Emergency white - newBrush("1;36"), // Alert cyan - newBrush("1;35"), // Critical magenta - newBrush("1;31"), // Error red - newBrush("1;33"), // Warning yellow - newBrush("1;32"), // Notice green - newBrush("1;34"), // Informational blue - newBrush("1;34"), // Debug blue -} - -// consoleWriter implements LoggerInterface and writes messages to terminal. -type consoleWriter struct { - lg *logWriter - Level int `json:"level"` - Colorful bool `json:"color"` //this filed is useful only when system's terminal supports color -} - -// NewConsole create ConsoleWriter returning as LoggerInterface. -func NewConsole() Logger { - cw := &consoleWriter{ - lg: newLogWriter(os.Stdout), - Level: LevelDebug, - Colorful: runtime.GOOS != "windows", - } - return cw -} - -// Init init console logger. -// jsonConfig like '{"level":LevelTrace}'. -func (c *consoleWriter) Init(jsonConfig string) error { - if len(jsonConfig) == 0 { - return nil - } - err := json.Unmarshal([]byte(jsonConfig), c) - if runtime.GOOS == "windows" { - c.Colorful = false - } - return err -} - -// WriteMsg write message in console. -func (c *consoleWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > c.Level { - return nil - } - if c.Colorful { - msg = colors[level](msg) - } - c.lg.println(when, msg) - return nil -} - -// Destroy implementing method. empty. -func (c *consoleWriter) Destroy() { - -} - -// Flush implementing method. empty. -func (c *consoleWriter) Flush() { - -} - -func init() { - Register(AdapterConsole, NewConsole) -} diff --git a/vendor/github.com/fatedier/beego/logs/file.go b/vendor/github.com/fatedier/beego/logs/file.go deleted file mode 100644 index a727652..0000000 --- a/vendor/github.com/fatedier/beego/logs/file.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" -) - -// fileLogWriter implements LoggerInterface. -// It writes messages by lines limit, file size limit, or time frequency. -type fileLogWriter struct { - sync.RWMutex // write log order by order and atomic incr maxLinesCurLines and maxSizeCurSize - // The opened file - Filename string `json:"filename"` - fileWriter *os.File - - // Rotate at line - MaxLines int `json:"maxlines"` - maxLinesCurLines int - - // Rotate at size - MaxSize int `json:"maxsize"` - maxSizeCurSize int - - // Rotate daily - Daily bool `json:"daily"` - MaxDays int64 `json:"maxdays"` - dailyOpenDate int - dailyOpenTime time.Time - - Rotate bool `json:"rotate"` - - Level int `json:"level"` - - Perm string `json:"perm"` - - fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix -} - -// newFileWriter create a FileLogWriter returning as LoggerInterface. -func newFileWriter() Logger { - w := &fileLogWriter{ - Daily: true, - MaxDays: 7, - Rotate: true, - Level: LevelTrace, - Perm: "0660", - } - return w -} - -// Init file logger with json config. -// jsonConfig like: -// { -// "filename":"logs/beego.log", -// "maxLines":10000, -// "maxsize":1024, -// "daily":true, -// "maxDays":15, -// "rotate":true, -// "perm":"0600" -// } -func (w *fileLogWriter) Init(jsonConfig string) error { - err := json.Unmarshal([]byte(jsonConfig), w) - if err != nil { - return err - } - if len(w.Filename) == 0 { - return errors.New("jsonconfig must have filename") - } - w.suffix = filepath.Ext(w.Filename) - w.fileNameOnly = strings.TrimSuffix(w.Filename, w.suffix) - if w.suffix == "" { - w.suffix = ".log" - } - err = w.startLogger() - return err -} - -// start file logger. create log file and set to locker-inside file writer. -func (w *fileLogWriter) startLogger() error { - file, err := w.createLogFile() - if err != nil { - return err - } - if w.fileWriter != nil { - w.fileWriter.Close() - } - w.fileWriter = file - return w.initFd() -} - -func (w *fileLogWriter) needRotate(size int, day int) bool { - return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) || - (w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) || - (w.Daily && day != w.dailyOpenDate) - -} - -// WriteMsg write logger message into file. -func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > w.Level { - return nil - } - h, d := formatTimeHeader(when) - msg = string(h) + msg + "\n" - if w.Rotate { - w.RLock() - if w.needRotate(len(msg), d) { - w.RUnlock() - w.Lock() - if w.needRotate(len(msg), d) { - if err := w.doRotate(when); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) - } - } - w.Unlock() - } else { - w.RUnlock() - } - } - - w.Lock() - _, err := w.fileWriter.Write([]byte(msg)) - if err == nil { - w.maxLinesCurLines++ - w.maxSizeCurSize += len(msg) - } - w.Unlock() - return err -} - -func (w *fileLogWriter) createLogFile() (*os.File, error) { - // Open the log file - perm, err := strconv.ParseInt(w.Perm, 8, 64) - if err != nil { - return nil, err - } - fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm)) - if err == nil { - // Make sure file perm is user set perm cause of `os.OpenFile` will obey umask - os.Chmod(w.Filename, os.FileMode(perm)) - } - return fd, err -} - -func (w *fileLogWriter) initFd() error { - fd := w.fileWriter - fInfo, err := fd.Stat() - if err != nil { - return fmt.Errorf("get stat err: %s\n", err) - } - w.maxSizeCurSize = int(fInfo.Size()) - w.dailyOpenTime = time.Now() - w.dailyOpenDate = w.dailyOpenTime.Day() - w.maxLinesCurLines = 0 - if w.Daily { - go w.dailyRotate(w.dailyOpenTime) - } - if fInfo.Size() > 0 { - count, err := w.lines() - if err != nil { - return err - } - w.maxLinesCurLines = count - } - return nil -} - -func (w *fileLogWriter) dailyRotate(openTime time.Time) { - y, m, d := openTime.Add(24 * time.Hour).Date() - nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location()) - tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100)) - select { - case <-tm.C: - w.Lock() - if w.needRotate(0, time.Now().Day()) { - if err := w.doRotate(time.Now()); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) - } - } - w.Unlock() - } -} - -func (w *fileLogWriter) lines() (int, error) { - fd, err := os.Open(w.Filename) - if err != nil { - return 0, err - } - defer fd.Close() - - buf := make([]byte, 32768) // 32k - count := 0 - lineSep := []byte{'\n'} - - for { - c, err := fd.Read(buf) - if err != nil && err != io.EOF { - return count, err - } - - count += bytes.Count(buf[:c], lineSep) - - if err == io.EOF { - break - } - } - - return count, nil -} - -// DoRotate means it need to write file in new file. -// new file name like xx.2013-01-01.log (daily) or xx.001.log (by line or size) -func (w *fileLogWriter) doRotate(logTime time.Time) error { - // file exists - // Find the next available number - num := 1 - fName := "" - - _, err := os.Lstat(w.Filename) - if err != nil { - //even if the file is not exist or other ,we should RESTART the logger - goto RESTART_LOGGER - } - - if w.MaxLines > 0 || w.MaxSize > 0 { - for ; err == nil && num <= 999; num++ { - fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format("2006-01-02"), num, w.suffix) - _, err = os.Lstat(fName) - } - } else { - fName = fmt.Sprintf("%s.%s%s", w.fileNameOnly, w.dailyOpenTime.Format("2006-01-02"), w.suffix) - _, err = os.Lstat(fName) - for ; err == nil && num <= 999; num++ { - fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", w.dailyOpenTime.Format("2006-01-02"), num, w.suffix) - _, err = os.Lstat(fName) - } - } - // return error if the last file checked still existed - if err == nil { - return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.Filename) - } - - // close fileWriter before rename - w.fileWriter.Close() - - // Rename the file to its new found name - // even if occurs error,we MUST guarantee to restart new logger - err = os.Rename(w.Filename, fName) - err = os.Chmod(fName, os.FileMode(0440)) - // re-start logger -RESTART_LOGGER: - - startLoggerErr := w.startLogger() - go w.deleteOldLog() - - if startLoggerErr != nil { - return fmt.Errorf("Rotate StartLogger: %s\n", startLoggerErr) - } - if err != nil { - return fmt.Errorf("Rotate: %s\n", err) - } - return nil - -} - -func (w *fileLogWriter) deleteOldLog() { - dir := filepath.Dir(w.Filename) - filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { - defer func() { - if r := recover(); r != nil { - fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r) - } - }() - - if info == nil { - return - } - - if !info.IsDir() && info.ModTime().Add(24*time.Hour*time.Duration(w.MaxDays)).Before(time.Now()) { - if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) && - strings.HasSuffix(filepath.Base(path), w.suffix) { - os.Remove(path) - } - } - return - }) -} - -// Destroy close the file description, close file writer. -func (w *fileLogWriter) Destroy() { - w.fileWriter.Close() -} - -// Flush flush file logger. -// there are no buffering messages in file logger in memory. -// flush file means sync file from disk. -func (w *fileLogWriter) Flush() { - w.fileWriter.Sync() -} - -func init() { - Register(AdapterFile, newFileWriter) -} diff --git a/vendor/github.com/fatedier/beego/logs/jianliao.go b/vendor/github.com/fatedier/beego/logs/jianliao.go deleted file mode 100644 index 16773c9..0000000 --- a/vendor/github.com/fatedier/beego/logs/jianliao.go +++ /dev/null @@ -1,78 +0,0 @@ -package logs - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "time" -) - -// JLWriter implements beego LoggerInterface and is used to send jiaoliao webhook -type JLWriter struct { - AuthorName string `json:"authorname"` - Title string `json:"title"` - WebhookURL string `json:"webhookurl"` - RedirectURL string `json:"redirecturl,omitempty"` - ImageURL string `json:"imageurl,omitempty"` - Level int `json:"level"` -} - -// newJLWriter create jiaoliao writer. -func newJLWriter() Logger { - return &JLWriter{Level: LevelTrace} -} - -// Init JLWriter with json config string -func (s *JLWriter) Init(jsonconfig string) error { - err := json.Unmarshal([]byte(jsonconfig), s) - if err != nil { - return err - } - return nil -} - -// WriteMsg write message in smtp writer. -// it will send an email with subject and only this message. -func (s *JLWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > s.Level { - return nil - } - - text := fmt.Sprintf("%s %s", when.Format("2006-01-02 15:04:05"), msg) - - form := url.Values{} - form.Add("authorName", s.AuthorName) - form.Add("title", s.Title) - form.Add("text", text) - if s.RedirectURL != "" { - form.Add("redirectUrl", s.RedirectURL) - } - if s.ImageURL != "" { - form.Add("imageUrl", s.ImageURL) - } - - resp, err := http.PostForm(s.WebhookURL, form) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode) - } - return nil -} - -// Flush implementing method. empty. -func (s *JLWriter) Flush() { - return -} - -// Destroy implementing method. empty. -func (s *JLWriter) Destroy() { - return -} - -func init() { - Register(AdapterJianLiao, newJLWriter) -} diff --git a/vendor/github.com/fatedier/beego/logs/log.go b/vendor/github.com/fatedier/beego/logs/log.go deleted file mode 100644 index 54eaff7..0000000 --- a/vendor/github.com/fatedier/beego/logs/log.go +++ /dev/null @@ -1,657 +0,0 @@ -// Copyright 2012 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package logs provide a general log interface -// Usage: -// -// import "github.com/astaxie/beego/logs" -// -// log := NewLogger(10000) -// log.SetLogger("console", "") -// -// > the first params stand for how many channel -// -// Use it like this: -// -// log.Trace("trace") -// log.Info("info") -// log.Warn("warning") -// log.Debug("debug") -// log.Critical("critical") -// -// more docs http://beego.me/docs/module/logs.md -package logs - -import ( - "fmt" - "log" - "os" - "path" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -// RFC5424 log message levels. -const ( - LevelEmergency = iota - LevelAlert - LevelCritical - LevelError - LevelWarning - LevelNotice - LevelInformational - LevelDebug - LevelTrace -) - -// levelLogLogger is defined to implement log.Logger -// the real log level will be LevelEmergency -const levelLoggerImpl = -1 - -// Name for adapter with beego official support -const ( - AdapterConsole = "console" - AdapterFile = "file" - AdapterMultiFile = "multifile" - AdapterMail = "smtp" - AdapterConn = "conn" - AdapterEs = "es" - AdapterJianLiao = "jianliao" - AdapterSlack = "slack" - AdapterAliLS = "alils" -) - -// Legacy log level constants to ensure backwards compatibility. -const ( - LevelInfo = LevelInformational - LevelWarn = LevelWarning -) - -type newLoggerFunc func() Logger - -// Logger defines the behavior of a log provider. -type Logger interface { - Init(config string) error - WriteMsg(when time.Time, msg string, level int) error - Destroy() - Flush() -} - -var adapters = make(map[string]newLoggerFunc) -var levelPrefix = [LevelTrace + 1]string{"[M] ", "[A] ", "[C] ", "[E] ", "[W] ", "[N] ", "[I] ", "[D] ", "[T] "} - -// Register makes a log provide available by the provided name. -// If Register is called twice with the same name or if driver is nil, -// it panics. -func Register(name string, log newLoggerFunc) { - if log == nil { - panic("logs: Register provide is nil") - } - if _, dup := adapters[name]; dup { - panic("logs: Register called twice for provider " + name) - } - adapters[name] = log -} - -// BeeLogger is default logger in beego application. -// it can contain several providers and log message into all providers. -type BeeLogger struct { - lock sync.Mutex - level int - init bool - enableFuncCallDepth bool - loggerFuncCallDepth int - asynchronous bool - msgChanLen int64 - msgChan chan *logMsg - signalChan chan string - wg sync.WaitGroup - outputs []*nameLogger -} - -const defaultAsyncMsgLen = 1e3 - -type nameLogger struct { - Logger - name string -} - -type logMsg struct { - level int - msg string - when time.Time -} - -var logMsgPool *sync.Pool - -// NewLogger returns a new BeeLogger. -// channelLen means the number of messages in chan(used where asynchronous is true). -// if the buffering chan is full, logger adapters write to file or other way. -func NewLogger(channelLens ...int64) *BeeLogger { - bl := new(BeeLogger) - bl.level = LevelDebug - bl.loggerFuncCallDepth = 2 - bl.msgChanLen = append(channelLens, 0)[0] - if bl.msgChanLen <= 0 { - bl.msgChanLen = defaultAsyncMsgLen - } - bl.signalChan = make(chan string, 1) - bl.setLogger(AdapterConsole) - return bl -} - -// Async set the log to asynchronous and start the goroutine -func (bl *BeeLogger) Async(msgLen ...int64) *BeeLogger { - bl.lock.Lock() - defer bl.lock.Unlock() - if bl.asynchronous { - return bl - } - bl.asynchronous = true - if len(msgLen) > 0 && msgLen[0] > 0 { - bl.msgChanLen = msgLen[0] - } - bl.msgChan = make(chan *logMsg, bl.msgChanLen) - logMsgPool = &sync.Pool{ - New: func() interface{} { - return &logMsg{} - }, - } - bl.wg.Add(1) - go bl.startLogger() - return bl -} - -// SetLogger provides a given logger adapter into BeeLogger with config string. -// config need to be correct JSON as string: {"interval":360}. -func (bl *BeeLogger) setLogger(adapterName string, configs ...string) error { - config := append(configs, "{}")[0] - for _, l := range bl.outputs { - if l.name == adapterName { - return fmt.Errorf("logs: duplicate adaptername %q (you have set this logger before)", adapterName) - } - } - - log, ok := adapters[adapterName] - if !ok { - return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) - } - - lg := log() - err := lg.Init(config) - if err != nil { - fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error()) - return err - } - bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg}) - return nil -} - -// SetLogger provides a given logger adapter into BeeLogger with config string. -// config need to be correct JSON as string: {"interval":360}. -func (bl *BeeLogger) SetLogger(adapterName string, configs ...string) error { - bl.lock.Lock() - defer bl.lock.Unlock() - if !bl.init { - bl.outputs = []*nameLogger{} - bl.init = true - } - return bl.setLogger(adapterName, configs...) -} - -// DelLogger remove a logger adapter in BeeLogger. -func (bl *BeeLogger) DelLogger(adapterName string) error { - bl.lock.Lock() - defer bl.lock.Unlock() - outputs := []*nameLogger{} - for _, lg := range bl.outputs { - if lg.name == adapterName { - lg.Destroy() - } else { - outputs = append(outputs, lg) - } - } - if len(outputs) == len(bl.outputs) { - return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) - } - bl.outputs = outputs - return nil -} - -func (bl *BeeLogger) writeToLoggers(when time.Time, msg string, level int) { - for _, l := range bl.outputs { - err := l.WriteMsg(when, msg, level) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err) - } - } -} - -func (bl *BeeLogger) Write(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - // writeMsg will always add a '\n' character - if p[len(p)-1] == '\n' { - p = p[0 : len(p)-1] - } - // set levelLoggerImpl to ensure all log message will be write out - err = bl.writeMsg(levelLoggerImpl, string(p)) - if err == nil { - return len(p), err - } - return 0, err -} - -func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error { - if !bl.init { - bl.lock.Lock() - bl.setLogger(AdapterConsole) - bl.lock.Unlock() - } - - if len(v) > 0 { - msg = fmt.Sprintf(msg, v...) - } - when := time.Now() - if bl.enableFuncCallDepth { - _, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth) - if !ok { - file = "???" - line = 0 - } else { - if strings.Contains(file, "") { - _, file, line, ok = runtime.Caller(bl.loggerFuncCallDepth + 1) - if !ok { - file = "???" - line = 0 - } - } - } - _, filename := path.Split(file) - msg = "[" + filename + ":" + strconv.FormatInt(int64(line), 10) + "] " + msg - } - - //set level info in front of filename info - if logLevel == levelLoggerImpl { - // set to emergency to ensure all log will be print out correctly - logLevel = LevelEmergency - } else { - msg = levelPrefix[logLevel] + msg - } - - if bl.asynchronous { - lm := logMsgPool.Get().(*logMsg) - lm.level = logLevel - lm.msg = msg - lm.when = when - bl.msgChan <- lm - } else { - bl.writeToLoggers(when, msg, logLevel) - } - return nil -} - -// SetLevel Set log message level. -// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning), -// log providers will not even be sent the message. -func (bl *BeeLogger) SetLevel(l int) { - bl.level = l -} - -// SetLogFuncCallDepth set log funcCallDepth -func (bl *BeeLogger) SetLogFuncCallDepth(d int) { - bl.loggerFuncCallDepth = d -} - -// GetLogFuncCallDepth return log funcCallDepth for wrapper -func (bl *BeeLogger) GetLogFuncCallDepth() int { - return bl.loggerFuncCallDepth -} - -// EnableFuncCallDepth enable log funcCallDepth -func (bl *BeeLogger) EnableFuncCallDepth(b bool) { - bl.enableFuncCallDepth = b -} - -// start logger chan reading. -// when chan is not empty, write logs. -func (bl *BeeLogger) startLogger() { - gameOver := false - for { - select { - case bm := <-bl.msgChan: - bl.writeToLoggers(bm.when, bm.msg, bm.level) - logMsgPool.Put(bm) - case sg := <-bl.signalChan: - // Now should only send "flush" or "close" to bl.signalChan - bl.flush() - if sg == "close" { - for _, l := range bl.outputs { - l.Destroy() - } - bl.outputs = nil - gameOver = true - } - bl.wg.Done() - } - if gameOver { - break - } - } -} - -// Emergency Log EMERGENCY level message. -func (bl *BeeLogger) Emergency(format string, v ...interface{}) { - if LevelEmergency > bl.level { - return - } - bl.writeMsg(LevelEmergency, format, v...) -} - -// Alert Log ALERT level message. -func (bl *BeeLogger) Alert(format string, v ...interface{}) { - if LevelAlert > bl.level { - return - } - bl.writeMsg(LevelAlert, format, v...) -} - -// Critical Log CRITICAL level message. -func (bl *BeeLogger) Critical(format string, v ...interface{}) { - if LevelCritical > bl.level { - return - } - bl.writeMsg(LevelCritical, format, v...) -} - -// Error Log ERROR level message. -func (bl *BeeLogger) Error(format string, v ...interface{}) { - if LevelError > bl.level { - return - } - bl.writeMsg(LevelError, format, v...) -} - -// Warning Log WARNING level message. -func (bl *BeeLogger) Warning(format string, v ...interface{}) { - if LevelWarn > bl.level { - return - } - bl.writeMsg(LevelWarn, format, v...) -} - -// Notice Log NOTICE level message. -func (bl *BeeLogger) Notice(format string, v ...interface{}) { - if LevelNotice > bl.level { - return - } - bl.writeMsg(LevelNotice, format, v...) -} - -// Informational Log INFORMATIONAL level message. -func (bl *BeeLogger) Informational(format string, v ...interface{}) { - if LevelInfo > bl.level { - return - } - bl.writeMsg(LevelInfo, format, v...) -} - -// Debug Log DEBUG level message. -func (bl *BeeLogger) Debug(format string, v ...interface{}) { - if LevelDebug > bl.level { - return - } - bl.writeMsg(LevelDebug, format, v...) -} - -// Warn Log WARN level message. -// compatibility alias for Warning() -func (bl *BeeLogger) Warn(format string, v ...interface{}) { - if LevelWarn > bl.level { - return - } - bl.writeMsg(LevelWarn, format, v...) -} - -// Info Log INFO level message. -// compatibility alias for Informational() -func (bl *BeeLogger) Info(format string, v ...interface{}) { - if LevelInfo > bl.level { - return - } - bl.writeMsg(LevelInfo, format, v...) -} - -// Trace Log TRACE level message. -// compatibility alias for Debug() -func (bl *BeeLogger) Trace(format string, v ...interface{}) { - if LevelTrace > bl.level { - return - } - bl.writeMsg(LevelTrace, format, v...) -} - -// Flush flush all chan data. -func (bl *BeeLogger) Flush() { - if bl.asynchronous { - bl.signalChan <- "flush" - bl.wg.Wait() - bl.wg.Add(1) - return - } - bl.flush() -} - -// Close close logger, flush all chan data and destroy all adapters in BeeLogger. -func (bl *BeeLogger) Close() { - if bl.asynchronous { - bl.signalChan <- "close" - bl.wg.Wait() - close(bl.msgChan) - } else { - bl.flush() - for _, l := range bl.outputs { - l.Destroy() - } - bl.outputs = nil - } - close(bl.signalChan) -} - -// Reset close all outputs, and set bl.outputs to nil -func (bl *BeeLogger) Reset() { - bl.Flush() - for _, l := range bl.outputs { - l.Destroy() - } - bl.outputs = nil -} - -func (bl *BeeLogger) flush() { - if bl.asynchronous { - for { - if len(bl.msgChan) > 0 { - bm := <-bl.msgChan - bl.writeToLoggers(bm.when, bm.msg, bm.level) - logMsgPool.Put(bm) - continue - } - break - } - } - for _, l := range bl.outputs { - l.Flush() - } -} - -// beeLogger references the used application logger. -var beeLogger *BeeLogger = NewLogger() - -// GetLogger returns the default BeeLogger -func GetBeeLogger() *BeeLogger { - return beeLogger -} - -var beeLoggerMap = struct { - sync.RWMutex - logs map[string]*log.Logger -}{ - logs: map[string]*log.Logger{}, -} - -// GetLogger returns the default BeeLogger -func GetLogger(prefixes ...string) *log.Logger { - prefix := append(prefixes, "")[0] - if prefix != "" { - prefix = fmt.Sprintf(`[%s] `, strings.ToUpper(prefix)) - } - beeLoggerMap.RLock() - l, ok := beeLoggerMap.logs[prefix] - if ok { - beeLoggerMap.RUnlock() - return l - } - beeLoggerMap.RUnlock() - beeLoggerMap.Lock() - defer beeLoggerMap.Unlock() - l, ok = beeLoggerMap.logs[prefix] - if !ok { - l = log.New(beeLogger, prefix, 0) - beeLoggerMap.logs[prefix] = l - } - return l -} - -// Reset will remove all the adapter -func Reset() { - beeLogger.Reset() -} - -func Async(msgLen ...int64) *BeeLogger { - return beeLogger.Async(msgLen...) -} - -// SetLevel sets the global log level used by the simple logger. -func SetLevel(l int) { - beeLogger.SetLevel(l) -} - -// EnableFuncCallDepth enable log funcCallDepth -func EnableFuncCallDepth(b bool) { - beeLogger.enableFuncCallDepth = b -} - -// SetLogFuncCall set the CallDepth, default is 4 -func SetLogFuncCall(b bool) { - beeLogger.EnableFuncCallDepth(b) - beeLogger.SetLogFuncCallDepth(4) -} - -// SetLogFuncCallDepth set log funcCallDepth -func SetLogFuncCallDepth(d int) { - beeLogger.loggerFuncCallDepth = d -} - -// SetLogger sets a new logger. -func SetLogger(adapter string, config ...string) error { - err := beeLogger.SetLogger(adapter, config...) - if err != nil { - return err - } - return nil -} - -// Emergency logs a message at emergency level. -func Emergency(f interface{}, v ...interface{}) { - beeLogger.Emergency(formatLog(f, v...)) -} - -// Alert logs a message at alert level. -func Alert(f interface{}, v ...interface{}) { - beeLogger.Alert(formatLog(f, v...)) -} - -// Critical logs a message at critical level. -func Critical(f interface{}, v ...interface{}) { - beeLogger.Critical(formatLog(f, v...)) -} - -// Error logs a message at error level. -func Error(f interface{}, v ...interface{}) { - beeLogger.Error(formatLog(f, v...)) -} - -// Warning logs a message at warning level. -func Warning(f interface{}, v ...interface{}) { - beeLogger.Warn(formatLog(f, v...)) -} - -// Warn compatibility alias for Warning() -func Warn(f interface{}, v ...interface{}) { - beeLogger.Warn(formatLog(f, v...)) -} - -// Notice logs a message at notice level. -func Notice(f interface{}, v ...interface{}) { - beeLogger.Notice(formatLog(f, v...)) -} - -// Informational logs a message at info level. -func Informational(f interface{}, v ...interface{}) { - beeLogger.Info(formatLog(f, v...)) -} - -// Info compatibility alias for Warning() -func Info(f interface{}, v ...interface{}) { - beeLogger.Info(formatLog(f, v...)) -} - -// Debug logs a message at debug level. -func Debug(f interface{}, v ...interface{}) { - beeLogger.Debug(formatLog(f, v...)) -} - -// Trace logs a message at trace level. -// compatibility alias for Warning() -func Trace(f interface{}, v ...interface{}) { - beeLogger.Trace(formatLog(f, v...)) -} - -func formatLog(f interface{}, v ...interface{}) string { - var msg string - switch f.(type) { - case string: - msg = f.(string) - if len(v) == 0 { - return msg - } - if strings.Contains(msg, "%") && !strings.Contains(msg, "%%") { - //format string - } else { - //do not contain format char - msg += strings.Repeat(" %v", len(v)) - } - default: - msg = fmt.Sprint(f) - if len(v) == 0 { - return msg - } - msg += strings.Repeat(" %v", len(v)) - } - return fmt.Sprintf(msg, v...) -} diff --git a/vendor/github.com/fatedier/beego/logs/logger.go b/vendor/github.com/fatedier/beego/logs/logger.go deleted file mode 100644 index e0abfdc..0000000 --- a/vendor/github.com/fatedier/beego/logs/logger.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "fmt" - "io" - "os" - "sync" - "time" -) - -type logWriter struct { - sync.Mutex - writer io.Writer -} - -func newLogWriter(wr io.Writer) *logWriter { - return &logWriter{writer: wr} -} - -func (lg *logWriter) println(when time.Time, msg string) { - lg.Lock() - h, _ := formatTimeHeader(when) - lg.writer.Write(append(append(h, msg...), '\n')) - lg.Unlock() -} - -type outputMode int - -// DiscardNonColorEscSeq supports the divided color escape sequence. -// But non-color escape sequence is not output. -// Please use the OutputNonColorEscSeq If you want to output a non-color -// escape sequences such as ncurses. However, it does not support the divided -// color escape sequence. -const ( - _ outputMode = iota - DiscardNonColorEscSeq - OutputNonColorEscSeq -) - -// NewAnsiColorWriter creates and initializes a new ansiColorWriter -// using io.Writer w as its initial contents. -// In the console of Windows, which change the foreground and background -// colors of the text by the escape sequence. -// In the console of other systems, which writes to w all text. -func NewAnsiColorWriter(w io.Writer) io.Writer { - return NewModeAnsiColorWriter(w, DiscardNonColorEscSeq) -} - -// NewModeAnsiColorWriter create and initializes a new ansiColorWriter -// by specifying the outputMode. -func NewModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer { - if _, ok := w.(*ansiColorWriter); !ok { - return &ansiColorWriter{ - w: w, - mode: mode, - } - } - return w -} - -const ( - y1 = `0123456789` - y2 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789` - y3 = `0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999` - y4 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789` - mo1 = `000000000111` - mo2 = `123456789012` - d1 = `0000000001111111111222222222233` - d2 = `1234567890123456789012345678901` - h1 = `000000000011111111112222` - h2 = `012345678901234567890123` - mi1 = `000000000011111111112222222222333333333344444444445555555555` - mi2 = `012345678901234567890123456789012345678901234567890123456789` - s1 = `000000000011111111112222222222333333333344444444445555555555` - s2 = `012345678901234567890123456789012345678901234567890123456789` -) - -func formatTimeHeader(when time.Time) ([]byte, int) { - y, mo, d := when.Date() - h, mi, s := when.Clock() - //len("2006/01/02 15:04:05 ")==20 - var buf [20]byte - - buf[0] = y1[y/1000%10] - buf[1] = y2[y/100] - buf[2] = y3[y-y/100*100] - buf[3] = y4[y-y/100*100] - buf[4] = '/' - buf[5] = mo1[mo-1] - buf[6] = mo2[mo-1] - buf[7] = '/' - buf[8] = d1[d-1] - buf[9] = d2[d-1] - buf[10] = ' ' - buf[11] = h1[h] - buf[12] = h2[h] - buf[13] = ':' - buf[14] = mi1[mi] - buf[15] = mi2[mi] - buf[16] = ':' - buf[17] = s1[s] - buf[18] = s2[s] - buf[19] = ' ' - - return buf[0:], d -} - -var ( - green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) - white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) - yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109}) - red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) - blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) - magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) - cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) - - w32Green = string([]byte{27, 91, 52, 50, 109}) - w32White = string([]byte{27, 91, 52, 55, 109}) - w32Yellow = string([]byte{27, 91, 52, 51, 109}) - w32Red = string([]byte{27, 91, 52, 49, 109}) - w32Blue = string([]byte{27, 91, 52, 52, 109}) - w32Magenta = string([]byte{27, 91, 52, 53, 109}) - w32Cyan = string([]byte{27, 91, 52, 54, 109}) - - reset = string([]byte{27, 91, 48, 109}) -) - -func ColorByStatus(cond bool, code int) string { - switch { - case code >= 200 && code < 300: - return map[bool]string{true: green, false: w32Green}[cond] - case code >= 300 && code < 400: - return map[bool]string{true: white, false: w32White}[cond] - case code >= 400 && code < 500: - return map[bool]string{true: yellow, false: w32Yellow}[cond] - default: - return map[bool]string{true: red, false: w32Red}[cond] - } -} - -func ColorByMethod(cond bool, method string) string { - switch method { - case "GET": - return map[bool]string{true: blue, false: w32Blue}[cond] - case "POST": - return map[bool]string{true: cyan, false: w32Cyan}[cond] - case "PUT": - return map[bool]string{true: yellow, false: w32Yellow}[cond] - case "DELETE": - return map[bool]string{true: red, false: w32Red}[cond] - case "PATCH": - return map[bool]string{true: green, false: w32Green}[cond] - case "HEAD": - return map[bool]string{true: magenta, false: w32Magenta}[cond] - case "OPTIONS": - return map[bool]string{true: white, false: w32White}[cond] - default: - return reset - } -} - -// Guard Mutex to guarantee atomicity of W32Debug(string) function -var mu sync.Mutex - -// Helper method to output colored logs in Windows terminals -func W32Debug(msg string) { - mu.Lock() - defer mu.Unlock() - - current := time.Now() - w := NewAnsiColorWriter(os.Stdout) - - fmt.Fprintf(w, "[beego] %v %s\n", current.Format("2006/01/02 - 15:04:05"), msg) -} diff --git a/vendor/github.com/fatedier/beego/logs/multifile.go b/vendor/github.com/fatedier/beego/logs/multifile.go deleted file mode 100644 index 63204e1..0000000 --- a/vendor/github.com/fatedier/beego/logs/multifile.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "encoding/json" - "time" -) - -// A filesLogWriter manages several fileLogWriter -// filesLogWriter will write logs to the file in json configuration and write the same level log to correspond file -// means if the file name in configuration is project.log filesLogWriter will create project.error.log/project.debug.log -// and write the error-level logs to project.error.log and write the debug-level logs to project.debug.log -// the rotate attribute also acts like fileLogWriter -type multiFileLogWriter struct { - writers [LevelDebug + 1 + 1]*fileLogWriter // the last one for fullLogWriter - fullLogWriter *fileLogWriter - Separate []string `json:"separate"` -} - -var levelNames = [...]string{"emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"} - -// Init file logger with json config. -// jsonConfig like: -// { -// "filename":"logs/beego.log", -// "maxLines":0, -// "maxsize":0, -// "daily":true, -// "maxDays":15, -// "rotate":true, -// "perm":0600, -// "separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"], -// } - -func (f *multiFileLogWriter) Init(config string) error { - writer := newFileWriter().(*fileLogWriter) - err := writer.Init(config) - if err != nil { - return err - } - f.fullLogWriter = writer - f.writers[LevelDebug+1] = writer - - //unmarshal "separate" field to f.Separate - json.Unmarshal([]byte(config), f) - - jsonMap := map[string]interface{}{} - json.Unmarshal([]byte(config), &jsonMap) - - for i := LevelEmergency; i < LevelDebug+1; i++ { - for _, v := range f.Separate { - if v == levelNames[i] { - jsonMap["filename"] = f.fullLogWriter.fileNameOnly + "." + levelNames[i] + f.fullLogWriter.suffix - jsonMap["level"] = i - bs, _ := json.Marshal(jsonMap) - writer = newFileWriter().(*fileLogWriter) - writer.Init(string(bs)) - f.writers[i] = writer - } - } - } - - return nil -} - -func (f *multiFileLogWriter) Destroy() { - for i := 0; i < len(f.writers); i++ { - if f.writers[i] != nil { - f.writers[i].Destroy() - } - } -} - -func (f *multiFileLogWriter) WriteMsg(when time.Time, msg string, level int) error { - if f.fullLogWriter != nil { - f.fullLogWriter.WriteMsg(when, msg, level) - } - for i := 0; i < len(f.writers)-1; i++ { - if f.writers[i] != nil { - if level == f.writers[i].Level { - f.writers[i].WriteMsg(when, msg, level) - } - } - } - return nil -} - -func (f *multiFileLogWriter) Flush() { - for i := 0; i < len(f.writers); i++ { - if f.writers[i] != nil { - f.writers[i].Flush() - } - } -} - -// newFilesWriter create a FileLogWriter returning as LoggerInterface. -func newFilesWriter() Logger { - return &multiFileLogWriter{} -} - -func init() { - Register(AdapterMultiFile, newFilesWriter) -} diff --git a/vendor/github.com/fatedier/beego/logs/slack.go b/vendor/github.com/fatedier/beego/logs/slack.go deleted file mode 100644 index 90f009c..0000000 --- a/vendor/github.com/fatedier/beego/logs/slack.go +++ /dev/null @@ -1,66 +0,0 @@ -package logs - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "time" -) - -// SLACKWriter implements beego LoggerInterface and is used to send jiaoliao webhook -type SLACKWriter struct { - WebhookURL string `json:"webhookurl"` - Level int `json:"level"` -} - -// newSLACKWriter create jiaoliao writer. -func newSLACKWriter() Logger { - return &SLACKWriter{Level: LevelTrace} -} - -// Init SLACKWriter with json config string -func (s *SLACKWriter) Init(jsonconfig string) error { - err := json.Unmarshal([]byte(jsonconfig), s) - if err != nil { - return err - } - return nil -} - -// WriteMsg write message in smtp writer. -// it will send an email with subject and only this message. -func (s *SLACKWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > s.Level { - return nil - } - - text := fmt.Sprintf("{\"text\": \"%s %s\"}", when.Format("2006-01-02 15:04:05"), msg) - - form := url.Values{} - form.Add("payload", text) - - resp, err := http.PostForm(s.WebhookURL, form) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode) - } - return nil -} - -// Flush implementing method. empty. -func (s *SLACKWriter) Flush() { - return -} - -// Destroy implementing method. empty. -func (s *SLACKWriter) Destroy() { - return -} - -func init() { - Register(AdapterSlack, newSLACKWriter) -} diff --git a/vendor/github.com/fatedier/beego/logs/smtp.go b/vendor/github.com/fatedier/beego/logs/smtp.go deleted file mode 100644 index 834130e..0000000 --- a/vendor/github.com/fatedier/beego/logs/smtp.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logs - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "net" - "net/smtp" - "strings" - "time" -) - -// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server. -type SMTPWriter struct { - Username string `json:"username"` - Password string `json:"password"` - Host string `json:"host"` - Subject string `json:"subject"` - FromAddress string `json:"fromAddress"` - RecipientAddresses []string `json:"sendTos"` - Level int `json:"level"` -} - -// NewSMTPWriter create smtp writer. -func newSMTPWriter() Logger { - return &SMTPWriter{Level: LevelTrace} -} - -// Init smtp writer with json config. -// config like: -// { -// "username":"example@gmail.com", -// "password:"password", -// "host":"smtp.gmail.com:465", -// "subject":"email title", -// "fromAddress":"from@example.com", -// "sendTos":["email1","email2"], -// "level":LevelError -// } -func (s *SMTPWriter) Init(jsonconfig string) error { - err := json.Unmarshal([]byte(jsonconfig), s) - if err != nil { - return err - } - return nil -} - -func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth { - if len(strings.Trim(s.Username, " ")) == 0 && len(strings.Trim(s.Password, " ")) == 0 { - return nil - } - return smtp.PlainAuth( - "", - s.Username, - s.Password, - host, - ) -} - -func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAddress string, recipients []string, msgContent []byte) error { - client, err := smtp.Dial(hostAddressWithPort) - if err != nil { - return err - } - - host, _, _ := net.SplitHostPort(hostAddressWithPort) - tlsConn := &tls.Config{ - InsecureSkipVerify: true, - ServerName: host, - } - if err = client.StartTLS(tlsConn); err != nil { - return err - } - - if auth != nil { - if err = client.Auth(auth); err != nil { - return err - } - } - - if err = client.Mail(fromAddress); err != nil { - return err - } - - for _, rec := range recipients { - if err = client.Rcpt(rec); err != nil { - return err - } - } - - w, err := client.Data() - if err != nil { - return err - } - _, err = w.Write([]byte(msgContent)) - if err != nil { - return err - } - - err = w.Close() - if err != nil { - return err - } - - err = client.Quit() - if err != nil { - return err - } - - return nil -} - -// WriteMsg write message in smtp writer. -// it will send an email with subject and only this message. -func (s *SMTPWriter) WriteMsg(when time.Time, msg string, level int) error { - if level > s.Level { - return nil - } - - hp := strings.Split(s.Host, ":") - - // Set up authentication information. - auth := s.getSMTPAuth(hp[0]) - - // Connect to the server, authenticate, set the sender and recipient, - // and send the email all in one step. - contentType := "Content-Type: text/plain" + "; charset=UTF-8" - mailmsg := []byte("To: " + strings.Join(s.RecipientAddresses, ";") + "\r\nFrom: " + s.FromAddress + "<" + s.FromAddress + - ">\r\nSubject: " + s.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", when.Format("2006-01-02 15:04:05")) + msg) - - return s.sendMail(s.Host, auth, s.FromAddress, s.RecipientAddresses, mailmsg) -} - -// Flush implementing method. empty. -func (s *SMTPWriter) Flush() { - return -} - -// Destroy implementing method. empty. -func (s *SMTPWriter) Destroy() { - return -} - -func init() { - Register(AdapterMail, newSMTPWriter) -} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 1b4ec93..0000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index bcfa195..0000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae31..0000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10..0000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index 5074bba..0000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,7 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 0ac8e4f..0000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedCopy4Tag = errors.New("snappy: unsupported COPY_4 tag") - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if len(dst) < dLen { - dst = make([]byte, dLen) - } - - var d, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return nil, ErrCorrupt - } - x = uint(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return nil, ErrCorrupt - } - x = uint(src[s-2]) | uint(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return nil, ErrCorrupt - } - x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return nil, ErrCorrupt - } - x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 - } - length = int(x + 1) - if length <= 0 { - return nil, errUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return nil, ErrCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) - - case tagCopy2: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(src[s-2]) | int(src[s-1])<<8 - - case tagCopy4: - return nil, errUnsupportedCopy4Tag - } - - if offset > d || length > len(dst)-d { - return nil, ErrCorrupt - } - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] - } - } - if d != dLen { - return nil, ErrCorrupt - } - return dst[:d], nil -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxUncompressedChunkLen), - buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4]) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if !r.readFull(r.decoded[:n]) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)]) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen]) { - return 0, r.err - } - } -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 297e628..0000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// We limit how far copy back-references can go, the same as the C++ code. -const maxOffset = 1 << 15 - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - case n < 1<<16: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - case n < 1<<24: - dst[0] = 62<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - i = 4 - case int64(n) < 1<<32: - dst[0] = 63<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - dst[4] = uint8(n >> 24) - i = 5 - default: - panic("snappy: source buffer is too long") - } - if copy(dst[i:], lit) != len(lit) { - panic("snappy: destination buffer is too short") - } - return i + len(lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst []byte, offset, length int) int { - i := 0 - for length > 0 { - x := length - 4 - if 0 <= x && x < 1<<3 && offset < 1<<11 { - dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - i += 2 - break - } - - x = length - if x > 1<<6 { - x = 1 << 6 - } - dst[i+0] = uint8(x-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= x - } - return i -} - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - // Return early if src is short. - if len(src) <= 4 { - if len(src) != 0 { - d += emitLiteral(dst[d:], src) - } - return dst[:d] - } - - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - const maxTableSize = 1 << 14 - shift, tableSize := uint(32-8), 1<<8 - for tableSize < maxTableSize && tableSize < len(src) { - shift-- - tableSize *= 2 - } - var table [maxTableSize]int - - // Iterate over the source bytes. - var ( - s int // The iterator position. - t int // The last position with the same hash as s. - lit int // The start position of any pending literal bytes. - ) - for uint(s+3) < uint(len(src)) { // The uint conversions catch overflow from the +3. - // Update the hash table. - b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] - h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 - p := &table[(h*0x1e35a7bd)>>shift] - // We need to to store values in [-1, inf) in table. To save - // some initialization time, (re)use the table's zero value - // and shift the values against this zero: add 1 on writes, - // subtract 1 on reads. - t, *p = *p-1, s+1 - // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. - if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { - // Skip multiple bytes if the last match was >= 32 bytes prior. - s += 1 + (s-lit)>>5 - continue - } - // Otherwise, we have a match. First, emit any pending literal bytes. - if lit != s { - d += emitLiteral(dst[d:], src[lit:s]) - } - // Extend the match to be as long as possible. - s0 := s - s, t = s+4, t+4 - for s < len(src) && src[s] == src[t] { - s++ - t++ - } - // Emit the copied bytes. - d += emitCopy(dst[d:], s-t, s-s0) - lit = s - } - - // Emit any final pending literal bytes and return. - if lit != len(src) { - d += emitLiteral(dst[d:], src[lit:]) - } - return dst[:d] -} - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -func MaxEncodedLen(srcLen int) int { - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - return 32 + srcLen + srcLen/6 -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxUncompressedChunkLen), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer than can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxUncompressedChunkLen { - uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ef1e33e..0000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at https://github.com/google/snappy -package snappy - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer supported. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 bytes". - maxUncompressedChunkLen = 65536 - - // maxEncodedLenOfMaxUncompressedChunkLen equals - // MaxEncodedLen(maxUncompressedChunkLen), but is hard coded to be a const - // instead of a variable, so that obufLen can also be a const. Their - // equivalence is confirmed by TestMaxEncodedLenOfMaxUncompressedChunkLen. - maxEncodedLenOfMaxUncompressedChunkLen = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxUncompressedChunkLen -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/julienschmidt/httprouter/.travis.yml b/vendor/github.com/julienschmidt/httprouter/.travis.yml deleted file mode 100644 index 24cae4c..0000000 --- a/vendor/github.com/julienschmidt/httprouter/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -sudo: false -language: go -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip diff --git a/vendor/github.com/julienschmidt/httprouter/LICENSE b/vendor/github.com/julienschmidt/httprouter/LICENSE deleted file mode 100644 index b829abc..0000000 --- a/vendor/github.com/julienschmidt/httprouter/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Julien Schmidt. All rights reserved. - - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * The names of the contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/julienschmidt/httprouter/README.md b/vendor/github.com/julienschmidt/httprouter/README.md deleted file mode 100644 index 7204fd6..0000000 --- a/vendor/github.com/julienschmidt/httprouter/README.md +++ /dev/null @@ -1,276 +0,0 @@ -# HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.svg?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![GoDoc](https://godoc.org/github.com/julienschmidt/httprouter?status.svg)](http://godoc.org/github.com/julienschmidt/httprouter) - -HttpRouter is a lightweight high performance HTTP request router (also called *multiplexer* or just *mux* for short) for [Go](https://golang.org/). - -In contrast to the [default mux][http.ServeMux] of Go's `net/http` package, this router supports variables in the routing pattern and matches against the request method. It also scales better. - -The router is optimized for high performance and a small memory footprint. It scales well even with very long paths and a large number of routes. A compressing dynamic trie (radix tree) structure is used for efficient matching. - -## Features - -**Only explicit matches:** With other routers, like [`http.ServeMux`][http.ServeMux], a requested URL path could match multiple patterns. Therefore they have some awkward pattern priority rules, like *longest match* or *first registered, first matched*. By design of this router, a request can only match exactly one or no route. As a result, there are also no unintended matches, which makes it great for SEO and improves the user experience. - -**Stop caring about trailing slashes:** Choose the URL style you like, the router automatically redirects the client if a trailing slash is missing or if there is one extra. Of course it only does so, if the new path has a handler. If you don't like it, you can [turn off this behavior](https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash). - -**Path auto-correction:** Besides detecting the missing or additional trailing slash at no extra cost, the router can also fix wrong cases and remove superfluous path elements (like `../` or `//`). Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? HttpRouter can help him by making a case-insensitive look-up and redirecting him to the correct URL. - -**Parameters in your routing pattern:** Stop parsing the requested URL path, just give the path segment a name and the router delivers the dynamic value to you. Because of the design of the router, path parameters are very cheap. - -**Zero Garbage:** The matching and dispatching process generates zero bytes of garbage. In fact, the only heap allocations that are made, is by building the slice of the key-value pairs for path parameters. If the request path contains no parameters, not a single heap allocation is necessary. - -**Best Performance:** [Benchmarks speak for themselves][benchmark]. See below for technical details of the implementation. - -**No more server crashes:** You can set a [Panic handler][Router.PanicHandler] to deal with panics occurring during handling a HTTP request. The router then recovers and lets the `PanicHandler` log what happened and deliver a nice error page. - -**Perfect for APIs:** The router design encourages to build sensible, hierarchical RESTful APIs. Moreover it has builtin native support for [OPTIONS requests](http://zacstewart.com/2012/04/14/http-options-method.html) and `405 Method Not Allowed` replies. - -Of course you can also set **custom [`NotFound`][Router.NotFound] and [`MethodNotAllowed`](https://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**][Router.ServeFiles]. - -## Usage - -This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details. - -Let's start with a trivial example: - -```go -package main - -import ( - "fmt" - "github.com/julienschmidt/httprouter" - "net/http" - "log" -) - -func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Welcome!\n") -} - -func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) -} - -func main() { - router := httprouter.New() - router.GET("/", Index) - router.GET("/hello/:name", Hello) - - log.Fatal(http.ListenAndServe(":8080", router)) -} -``` - -### Named parameters - -As you can see, `:name` is a *named parameter*. The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: `:name` can be retrived by `ByName("name")`. - -Named parameters only match a single path segment: - -``` -Pattern: /user/:user - - /user/gordon match - /user/you match - /user/gordon/profile no match - /user/ no match -``` - -**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other. - -### Catch-All parameters - -The second type are *catch-all* parameters and have the form `*name`. Like the name suggests, they match everything. Therefore they must always be at the **end** of the pattern: - -``` -Pattern: /src/*filepath - - /src/ match - /src/somefile.go match - /src/subdir/somefile.go match -``` - -## How does it work? - -The router relies on a tree structure which makes heavy use of *common prefixes*, it is basically a *compact* [*prefix tree*](https://en.wikipedia.org/wiki/Trie) (or just [*Radix tree*](https://en.wikipedia.org/wiki/Radix_tree)). Nodes with a common prefix also share a common parent. Here is a short example what the routing tree for the `GET` request method could look like: - -``` -Priority Path Handle -9 \ *<1> -3 ├s nil -2 |├earch\ *<2> -1 |└upport\ *<3> -2 ├blog\ *<4> -1 | └:post nil -1 | └\ *<5> -2 ├about-us\ *<6> -1 | └team\ *<7> -1 └contact\ *<8> -``` - -Every `*` represents the memory address of a handler function (a pointer). If you follow a path trough the tree from the root to the leaf, you get the complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder ([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a tree structure also allows us to use dynamic parts like the `:post` parameter, since we actually match against the routing patterns instead of just comparing hashes. [As benchmarks show][benchmark], this works very well and efficient. - -Since URL paths have a hierarchical structure and make use only of a limited set of characters (byte values), it is very likely that there are a lot of common prefixes. This allows us to easily reduce the routing into ever smaller problems. Moreover the router manages a separate tree for every request method. For one thing it is more space efficient than holding a method->handle map in every single node, for another thing is also allows us to greatly reduce the routing problem before even starting the look-up in the prefix-tree. - -For even better scalability, the child nodes on each tree level are ordered by priority, where the priority is just the number of handles registered in sub nodes (children, grandchildren, and so on..). This helps in two ways: - -1. Nodes which are part of the most routing paths are evaluated first. This helps to make as much routes as possible to be reachable as fast as possible. -2. It is some sort of cost compensation. The longest reachable path (highest cost) can always be evaluated first. The following scheme visualizes the tree structure. Nodes are evaluated from top to bottom and from left to right. - -``` -├------------ -├--------- -├----- -├---- -├-- -├-- -└- -``` - -## Why doesn't this work with `http.Handler`? - -**It does!** The router itself implements the `http.Handler` interface. Moreover the router provides convenient [adapters for `http.Handler`][Router.Handler]s and [`http.HandlerFunc`][Router.HandlerFunc]s which allows them to be used as a [`httprouter.Handle`][Router.Handle] when registering a route. The only disadvantage is, that no parameter values can be retrieved when a `http.Handler` or `http.HandlerFunc` is used, since there is no efficient way to pass the values with the existing function parameters. Therefore [`httprouter.Handle`][Router.Handle] has a third function parameter. - -Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up. - -## Where can I find Middleware *X*? - -This package just provides a very efficient request router with a few extra features. The router is just a [`http.Handler`][http.Handler], you can chain any http.Handler compatible middleware before the router, for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). Or you could [just write your own](https://justinas.org/writing-http-middleware-in-go/), it's very easy! - -Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter). - -### Multi-domain / Sub-domains - -Here is a quick example: Does your server serve multiple domains / hosts? -You want to use sub-domains? -Define a router per host! - -```go -// We need an object that implements the http.Handler interface. -// Therefore we need a type for which we implement the ServeHTTP method. -// We just use a map here, in which we map host names (with port) to http.Handlers -type HostSwitch map[string]http.Handler - -// Implement the ServerHTTP method on our new type -func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Check if a http.Handler is registered for the given host. - // If yes, use it to handle the request. - if handler := hs[r.Host]; handler != nil { - handler.ServeHTTP(w, r) - } else { - // Handle host names for wich no handler is registered - http.Error(w, "Forbidden", 403) // Or Redirect? - } -} - -func main() { - // Initialize a router as usual - router := httprouter.New() - router.GET("/", Index) - router.GET("/hello/:name", Hello) - - // Make a new HostSwitch and insert the router (our http handler) - // for example.com and port 12345 - hs := make(HostSwitch) - hs["example.com:12345"] = router - - // Use the HostSwitch to listen and serve on port 12345 - log.Fatal(http.ListenAndServe(":12345", hs)) -} -``` - -### Basic Authentication - -Another quick example: Basic Authentication (RFC 2617) for handles: - -```go -package main - -import ( - "fmt" - "log" - "net/http" - - "github.com/julienschmidt/httprouter" -) - -func BasicAuth(h httprouter.Handle, requiredUser, requiredPassword string) httprouter.Handle { - return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - // Get the Basic Authentication credentials - user, password, hasAuth := r.BasicAuth() - - if hasAuth && user == requiredUser && password == requiredPassword { - // Delegate request to the given handle - h(w, r, ps) - } else { - // Request Basic Authentication otherwise - w.Header().Set("WWW-Authenticate", "Basic realm=Restricted") - http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) - } - } -} - -func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Not protected!\n") -} - -func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - fmt.Fprint(w, "Protected!\n") -} - -func main() { - user := "gordon" - pass := "secret!" - - router := httprouter.New() - router.GET("/", Index) - router.GET("/protected/", BasicAuth(Protected, user, pass)) - - log.Fatal(http.ListenAndServe(":8080", router)) -} -``` - -## Chaining with the NotFound handler - -**NOTE: It might be required to set [`Router.HandleMethodNotAllowed`][Router.HandleMethodNotAllowed] to `false` to avoid problems.** - -You can use another [`http.Handler`][http.Handler], for example another router, to handle requests which could not be matched by this router by using the [`Router.NotFound`][Router.NotFound] handler. This allows chaining. - -### Static files - -The `NotFound` handler can for example be used to serve static files from the root path `/` (like an `index.html` file along with other assets): - -```go -// Serve static files from the ./public directory -router.NotFound = http.FileServer(http.Dir("public")) -``` - -But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`. - -## Web Frameworks based on HttpRouter - -If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package: - -* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework -* [api2go](https://github.com/manyminds/api2go): A JSON API Implementation for Go -* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance -* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go -* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine -* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow -* [httpway](https://github.com/corneldamian/httpway): Simple middleware extension with context for httprouter and a server with gracefully shutdown support -* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context -* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba -* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang -* [River](https://github.com/abiosoft/river): River is a simple and lightweight REST server -* [Roxanna](https://github.com/iamthemuffinman/Roxanna): An amalgamation of httprouter, better logging, and hot reload -* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts -* [xmux](https://github.com/rs/xmux): xmux is a httprouter fork on top of xhandler (net/context aware) - -[benchmark]: -[http.Handler]: -[Router.Handle]: -[Router.HandleMethodNotAllowed]: -[Router.Handler]: -[Router.HandlerFunc]: -[Router.NotFound]: -[Router.PanicHandler]: -[Router.ServeFiles]: diff --git a/vendor/github.com/julienschmidt/httprouter/path.go b/vendor/github.com/julienschmidt/httprouter/path.go deleted file mode 100644 index 486134d..0000000 --- a/vendor/github.com/julienschmidt/httprouter/path.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Based on the path package, Copyright 2009 The Go Authors. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -// CleanPath is the URL version of path.Clean, it returns a canonical URL path -// for p, eliminating . and .. elements. -// -// The following rules are applied iteratively until no further processing can -// be done: -// 1. Replace multiple slashes with a single slash. -// 2. Eliminate each . path name element (the current directory). -// 3. Eliminate each inner .. path name element (the parent directory) -// along with the non-.. element that precedes it. -// 4. Eliminate .. elements that begin a rooted path: -// that is, replace "/.." by "/" at the beginning of a path. -// -// If the result of this process is an empty string, "/" is returned -func CleanPath(p string) string { - // Turn empty string into "/" - if p == "" { - return "/" - } - - n := len(p) - var buf []byte - - // Invariants: - // reading from path; r is index of next byte to process. - // writing to buf; w is index of next byte to write. - - // path must start with '/' - r := 1 - w := 1 - - if p[0] != '/' { - r = 0 - buf = make([]byte, n+1) - buf[0] = '/' - } - - trailing := n > 2 && p[n-1] == '/' - - // A bit more clunky without a 'lazybuf' like the path package, but the loop - // gets completely inlined (bufApp). So in contrast to the path package this - // loop has no expensive function calls (except 1x make) - - for r < n { - switch { - case p[r] == '/': - // empty path element, trailing slash is added after the end - r++ - - case p[r] == '.' && r+1 == n: - trailing = true - r++ - - case p[r] == '.' && p[r+1] == '/': - // . element - r++ - - case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): - // .. element: remove to last / - r += 2 - - if w > 1 { - // can backtrack - w-- - - if buf == nil { - for w > 1 && p[w] != '/' { - w-- - } - } else { - for w > 1 && buf[w] != '/' { - w-- - } - } - } - - default: - // real path element. - // add slash if needed - if w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - // copy element - for r < n && p[r] != '/' { - bufApp(&buf, p, w, p[r]) - w++ - r++ - } - } - } - - // re-append trailing slash - if trailing && w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - if buf == nil { - return p[:w] - } - return string(buf[:w]) -} - -// internal helper to lazily create a buffer if necessary -func bufApp(buf *[]byte, s string, w int, c byte) { - if *buf == nil { - if s[w] == c { - return - } - - *buf = make([]byte, len(s)) - copy(*buf, s[:w]) - } - (*buf)[w] = c -} diff --git a/vendor/github.com/julienschmidt/httprouter/router.go b/vendor/github.com/julienschmidt/httprouter/router.go deleted file mode 100644 index bb17330..0000000 --- a/vendor/github.com/julienschmidt/httprouter/router.go +++ /dev/null @@ -1,411 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -// Package httprouter is a trie based high performance HTTP request router. -// -// A trivial example is: -// -// package main -// -// import ( -// "fmt" -// "github.com/julienschmidt/httprouter" -// "net/http" -// "log" -// ) -// -// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { -// fmt.Fprint(w, "Welcome!\n") -// } -// -// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { -// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) -// } -// -// func main() { -// router := httprouter.New() -// router.GET("/", Index) -// router.GET("/hello/:name", Hello) -// -// log.Fatal(http.ListenAndServe(":8080", router)) -// } -// -// The router matches incoming requests by the request method and the path. -// If a handle is registered for this path and method, the router delegates the -// request to that function. -// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to -// register handles, for all other methods router.Handle can be used. -// -// The registered path, against which the router matches incoming requests, can -// contain two types of parameters: -// Syntax Type -// :name named parameter -// *name catch-all parameter -// -// Named parameters are dynamic path segments. They match anything until the -// next '/' or the path end: -// Path: /blog/:category/:post -// -// Requests: -// /blog/go/request-routers match: category="go", post="request-routers" -// /blog/go/request-routers/ no match, but the router would redirect -// /blog/go/ no match -// /blog/go/request-routers/comments no match -// -// Catch-all parameters match anything until the path end, including the -// directory index (the '/' before the catch-all). Since they match anything -// until the end, catch-all parameters must always be the final path element. -// Path: /files/*filepath -// -// Requests: -// /files/ match: filepath="/" -// /files/LICENSE match: filepath="/LICENSE" -// /files/templates/article.html match: filepath="/templates/article.html" -// /files no match, but the router would redirect -// -// The value of parameters is saved as a slice of the Param struct, consisting -// each of a key and a value. The slice is passed to the Handle func as a third -// parameter. -// There are two ways to retrieve the value of a parameter: -// // by the name of the parameter -// user := ps.ByName("user") // defined by :user or *user -// -// // by the index of the parameter. This way you can also get the name (key) -// thirdKey := ps[2].Key // the name of the 3rd parameter -// thirdValue := ps[2].Value // the value of the 3rd parameter -package httprouter - -import ( - "net/http" -) - -// Handle is a function that can be registered to a route to handle HTTP -// requests. Like http.HandlerFunc, but has a third parameter for the values of -// wildcards (variables). -type Handle func(http.ResponseWriter, *http.Request, Params) - -// Param is a single URL parameter, consisting of a key and a value. -type Param struct { - Key string - Value string -} - -// Params is a Param-slice, as returned by the router. -// The slice is ordered, the first URL parameter is also the first slice value. -// It is therefore safe to read values by the index. -type Params []Param - -// ByName returns the value of the first Param which key matches the given name. -// If no matching Param is found, an empty string is returned. -func (ps Params) ByName(name string) string { - for i := range ps { - if ps[i].Key == name { - return ps[i].Value - } - } - return "" -} - -// Router is a http.Handler which can be used to dispatch requests to different -// handler functions via configurable routes -type Router struct { - trees map[string]*node - - // Enables automatic redirection if the current route can't be matched but a - // handler for the path with (without) the trailing slash exists. - // For example if /foo/ is requested but a route only exists for /foo, the - // client is redirected to /foo with http status code 301 for GET requests - // and 307 for all other request methods. - RedirectTrailingSlash bool - - // If enabled, the router tries to fix the current request path, if no - // handle is registered for it. - // First superfluous path elements like ../ or // are removed. - // Afterwards the router does a case-insensitive lookup of the cleaned path. - // If a handle can be found for this route, the router makes a redirection - // to the corrected path with status code 301 for GET requests and 307 for - // all other request methods. - // For example /FOO and /..//Foo could be redirected to /foo. - // RedirectTrailingSlash is independent of this option. - RedirectFixedPath bool - - // If enabled, the router checks if another method is allowed for the - // current route, if the current request can not be routed. - // If this is the case, the request is answered with 'Method Not Allowed' - // and HTTP status code 405. - // If no other Method is allowed, the request is delegated to the NotFound - // handler. - HandleMethodNotAllowed bool - - // If enabled, the router automatically replies to OPTIONS requests. - // Custom OPTIONS handlers take priority over automatic replies. - HandleOPTIONS bool - - // Configurable http.Handler which is called when no matching route is - // found. If it is not set, http.NotFound is used. - NotFound http.Handler - - // Configurable http.Handler which is called when a request - // cannot be routed and HandleMethodNotAllowed is true. - // If it is not set, http.Error with http.StatusMethodNotAllowed is used. - // The "Allow" header with allowed request methods is set before the handler - // is called. - MethodNotAllowed http.Handler - - // Function to handle panics recovered from http handlers. - // It should be used to generate a error page and return the http error code - // 500 (Internal Server Error). - // The handler can be used to keep your server from crashing because of - // unrecovered panics. - PanicHandler func(http.ResponseWriter, *http.Request, interface{}) -} - -// Make sure the Router conforms with the http.Handler interface -var _ http.Handler = New() - -// New returns a new initialized Router. -// Path auto-correction, including trailing slashes, is enabled by default. -func New() *Router { - return &Router{ - RedirectTrailingSlash: true, - RedirectFixedPath: true, - HandleMethodNotAllowed: true, - HandleOPTIONS: true, - } -} - -// GET is a shortcut for router.Handle("GET", path, handle) -func (r *Router) GET(path string, handle Handle) { - r.Handle("GET", path, handle) -} - -// HEAD is a shortcut for router.Handle("HEAD", path, handle) -func (r *Router) HEAD(path string, handle Handle) { - r.Handle("HEAD", path, handle) -} - -// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle) -func (r *Router) OPTIONS(path string, handle Handle) { - r.Handle("OPTIONS", path, handle) -} - -// POST is a shortcut for router.Handle("POST", path, handle) -func (r *Router) POST(path string, handle Handle) { - r.Handle("POST", path, handle) -} - -// PUT is a shortcut for router.Handle("PUT", path, handle) -func (r *Router) PUT(path string, handle Handle) { - r.Handle("PUT", path, handle) -} - -// PATCH is a shortcut for router.Handle("PATCH", path, handle) -func (r *Router) PATCH(path string, handle Handle) { - r.Handle("PATCH", path, handle) -} - -// DELETE is a shortcut for router.Handle("DELETE", path, handle) -func (r *Router) DELETE(path string, handle Handle) { - r.Handle("DELETE", path, handle) -} - -// Handle registers a new request handle with the given path and method. -// -// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut -// functions can be used. -// -// This function is intended for bulk loading and to allow the usage of less -// frequently used, non-standardized or custom methods (e.g. for internal -// communication with a proxy). -func (r *Router) Handle(method, path string, handle Handle) { - if path[0] != '/' { - panic("path must begin with '/' in path '" + path + "'") - } - - if r.trees == nil { - r.trees = make(map[string]*node) - } - - root := r.trees[method] - if root == nil { - root = new(node) - r.trees[method] = root - } - - root.addRoute(path, handle) -} - -// Handler is an adapter which allows the usage of an http.Handler as a -// request handle. -func (r *Router) Handler(method, path string, handler http.Handler) { - r.Handle(method, path, - func(w http.ResponseWriter, req *http.Request, _ Params) { - handler.ServeHTTP(w, req) - }, - ) -} - -// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a -// request handle. -func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) { - r.Handler(method, path, handler) -} - -// ServeFiles serves files from the given file system root. -// The path must end with "/*filepath", files are then served from the local -// path /defined/root/dir/*filepath. -// For example if root is "/etc" and *filepath is "passwd", the local file -// "/etc/passwd" would be served. -// Internally a http.FileServer is used, therefore http.NotFound is used instead -// of the Router's NotFound handler. -// To use the operating system's file system implementation, -// use http.Dir: -// router.ServeFiles("/src/*filepath", http.Dir("/var/www")) -func (r *Router) ServeFiles(path string, root http.FileSystem) { - if len(path) < 10 || path[len(path)-10:] != "/*filepath" { - panic("path must end with /*filepath in path '" + path + "'") - } - - fileServer := http.FileServer(root) - - r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) { - req.URL.Path = ps.ByName("filepath") - fileServer.ServeHTTP(w, req) - }) -} - -func (r *Router) recv(w http.ResponseWriter, req *http.Request) { - if rcv := recover(); rcv != nil { - r.PanicHandler(w, req, rcv) - } -} - -// Lookup allows the manual lookup of a method + path combo. -// This is e.g. useful to build a framework around this router. -// If the path was found, it returns the handle function and the path parameter -// values. Otherwise the third return value indicates whether a redirection to -// the same path with an extra / without the trailing slash should be performed. -func (r *Router) Lookup(method, path string) (Handle, Params, bool) { - if root := r.trees[method]; root != nil { - return root.getValue(path) - } - return nil, nil, false -} - -func (r *Router) allowed(path, reqMethod string) (allow string) { - if path == "*" { // server-wide - for method := range r.trees { - if method == "OPTIONS" { - continue - } - - // add request method to list of allowed methods - if len(allow) == 0 { - allow = method - } else { - allow += ", " + method - } - } - } else { // specific path - for method := range r.trees { - // Skip the requested method - we already tried this one - if method == reqMethod || method == "OPTIONS" { - continue - } - - handle, _, _ := r.trees[method].getValue(path) - if handle != nil { - // add request method to list of allowed methods - if len(allow) == 0 { - allow = method - } else { - allow += ", " + method - } - } - } - } - if len(allow) > 0 { - allow += ", OPTIONS" - } - return -} - -// ServeHTTP makes the router implement the http.Handler interface. -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if r.PanicHandler != nil { - defer r.recv(w, req) - } - - path := req.URL.Path - - if root := r.trees[req.Method]; root != nil { - if handle, ps, tsr := root.getValue(path); handle != nil { - handle(w, req, ps) - return - } else if req.Method != "CONNECT" && path != "/" { - code := 301 // Permanent redirect, request with GET method - if req.Method != "GET" { - // Temporary redirect, request with same method - // As of Go 1.3, Go does not support status code 308. - code = 307 - } - - if tsr && r.RedirectTrailingSlash { - if len(path) > 1 && path[len(path)-1] == '/' { - req.URL.Path = path[:len(path)-1] - } else { - req.URL.Path = path + "/" - } - http.Redirect(w, req, req.URL.String(), code) - return - } - - // Try to fix the request path - if r.RedirectFixedPath { - fixedPath, found := root.findCaseInsensitivePath( - CleanPath(path), - r.RedirectTrailingSlash, - ) - if found { - req.URL.Path = string(fixedPath) - http.Redirect(w, req, req.URL.String(), code) - return - } - } - } - } - - if req.Method == "OPTIONS" { - // Handle OPTIONS requests - if r.HandleOPTIONS { - if allow := r.allowed(path, req.Method); len(allow) > 0 { - w.Header().Set("Allow", allow) - return - } - } - } else { - // Handle 405 - if r.HandleMethodNotAllowed { - if allow := r.allowed(path, req.Method); len(allow) > 0 { - w.Header().Set("Allow", allow) - if r.MethodNotAllowed != nil { - r.MethodNotAllowed.ServeHTTP(w, req) - } else { - http.Error(w, - http.StatusText(http.StatusMethodNotAllowed), - http.StatusMethodNotAllowed, - ) - } - return - } - } - } - - // Handle 404 - if r.NotFound != nil { - r.NotFound.ServeHTTP(w, req) - } else { - http.NotFound(w, req) - } -} diff --git a/vendor/github.com/julienschmidt/httprouter/tree.go b/vendor/github.com/julienschmidt/httprouter/tree.go deleted file mode 100644 index 474da00..0000000 --- a/vendor/github.com/julienschmidt/httprouter/tree.go +++ /dev/null @@ -1,651 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package httprouter - -import ( - "strings" - "unicode" - "unicode/utf8" -) - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -func countParams(path string) uint8 { - var n uint - for i := 0; i < len(path); i++ { - if path[i] != ':' && path[i] != '*' { - continue - } - n++ - } - if n >= 255 { - return 255 - } - return uint8(n) -} - -type nodeType uint8 - -const ( - static nodeType = iota // default - root - param - catchAll -) - -type node struct { - path string - wildChild bool - nType nodeType - maxParams uint8 - indices string - children []*node - handle Handle - priority uint32 -} - -// increments priority of the given child and reorders if necessary -func (n *node) incrementChildPrio(pos int) int { - n.children[pos].priority++ - prio := n.children[pos].priority - - // adjust position (move to front) - newPos := pos - for newPos > 0 && n.children[newPos-1].priority < prio { - // swap node positions - n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1] - - newPos-- - } - - // build new index char string - if newPos != pos { - n.indices = n.indices[:newPos] + // unchanged prefix, might be empty - n.indices[pos:pos+1] + // the index char we move - n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' - } - - return newPos -} - -// addRoute adds a node with the given handle to the path. -// Not concurrency-safe! -func (n *node) addRoute(path string, handle Handle) { - fullPath := path - n.priority++ - numParams := countParams(path) - - // non-empty tree - if len(n.path) > 0 || len(n.children) > 0 { - walk: - for { - // Update maxParams of the current node - if numParams > n.maxParams { - n.maxParams = numParams - } - - // Find the longest common prefix. - // This also implies that the common prefix contains no ':' or '*' - // since the existing key can't contain those chars. - i := 0 - max := min(len(path), len(n.path)) - for i < max && path[i] == n.path[i] { - i++ - } - - // Split edge - if i < len(n.path) { - child := node{ - path: n.path[i:], - wildChild: n.wildChild, - nType: static, - indices: n.indices, - children: n.children, - handle: n.handle, - priority: n.priority - 1, - } - - // Update maxParams (max of all children) - for i := range child.children { - if child.children[i].maxParams > child.maxParams { - child.maxParams = child.children[i].maxParams - } - } - - n.children = []*node{&child} - // []byte for proper unicode char conversion, see #65 - n.indices = string([]byte{n.path[i]}) - n.path = path[:i] - n.handle = nil - n.wildChild = false - } - - // Make new node a child of this node - if i < len(path) { - path = path[i:] - - if n.wildChild { - n = n.children[0] - n.priority++ - - // Update maxParams of the child node - if numParams > n.maxParams { - n.maxParams = numParams - } - numParams-- - - // Check if the wildcard matches - if len(path) >= len(n.path) && n.path == path[:len(n.path)] && - // Check for longer wildcard, e.g. :name and :names - (len(n.path) >= len(path) || path[len(n.path)] == '/') { - continue walk - } else { - // Wildcard conflict - pathSeg := strings.SplitN(path, "/", 2)[0] - prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path - panic("'" + pathSeg + - "' in new path '" + fullPath + - "' conflicts with existing wildcard '" + n.path + - "' in existing prefix '" + prefix + - "'") - } - } - - c := path[0] - - // slash after param - if n.nType == param && c == '/' && len(n.children) == 1 { - n = n.children[0] - n.priority++ - continue walk - } - - // Check if a child with the next path byte exists - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - i = n.incrementChildPrio(i) - n = n.children[i] - continue walk - } - } - - // Otherwise insert it - if c != ':' && c != '*' { - // []byte for proper unicode char conversion, see #65 - n.indices += string([]byte{c}) - child := &node{ - maxParams: numParams, - } - n.children = append(n.children, child) - n.incrementChildPrio(len(n.indices) - 1) - n = child - } - n.insertChild(numParams, path, fullPath, handle) - return - - } else if i == len(path) { // Make node a (in-path) leaf - if n.handle != nil { - panic("a handle is already registered for path '" + fullPath + "'") - } - n.handle = handle - } - return - } - } else { // Empty tree - n.insertChild(numParams, path, fullPath, handle) - n.nType = root - } -} - -func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle) { - var offset int // already handled bytes of the path - - // find prefix until first wildcard (beginning with ':'' or '*'') - for i, max := 0, len(path); numParams > 0; i++ { - c := path[i] - if c != ':' && c != '*' { - continue - } - - // find wildcard end (either '/' or path end) - end := i + 1 - for end < max && path[end] != '/' { - switch path[end] { - // the wildcard name must not contain ':' and '*' - case ':', '*': - panic("only one wildcard per path segment is allowed, has: '" + - path[i:] + "' in path '" + fullPath + "'") - default: - end++ - } - } - - // check if this Node existing children which would be - // unreachable if we insert the wildcard here - if len(n.children) > 0 { - panic("wildcard route '" + path[i:end] + - "' conflicts with existing children in path '" + fullPath + "'") - } - - // check if the wildcard has a name - if end-i < 2 { - panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") - } - - if c == ':' { // param - // split path at the beginning of the wildcard - if i > 0 { - n.path = path[offset:i] - offset = i - } - - child := &node{ - nType: param, - maxParams: numParams, - } - n.children = []*node{child} - n.wildChild = true - n = child - n.priority++ - numParams-- - - // if the path doesn't end with the wildcard, then there - // will be another non-wildcard subpath starting with '/' - if end < max { - n.path = path[offset:end] - offset = end - - child := &node{ - maxParams: numParams, - priority: 1, - } - n.children = []*node{child} - n = child - } - - } else { // catchAll - if end != max || numParams > 1 { - panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") - } - - if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { - panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") - } - - // currently fixed width 1 for '/' - i-- - if path[i] != '/' { - panic("no / before catch-all in path '" + fullPath + "'") - } - - n.path = path[offset:i] - - // first node: catchAll node with empty path - child := &node{ - wildChild: true, - nType: catchAll, - maxParams: 1, - } - n.children = []*node{child} - n.indices = string(path[i]) - n = child - n.priority++ - - // second node: node holding the variable - child = &node{ - path: path[i:], - nType: catchAll, - maxParams: 1, - handle: handle, - priority: 1, - } - n.children = []*node{child} - - return - } - } - - // insert remaining path part and handle to the leaf - n.path = path[offset:] - n.handle = handle -} - -// Returns the handle registered with the given path (key). The values of -// wildcards are saved to a map. -// If no handle can be found, a TSR (trailing slash redirect) recommendation is -// made if a handle exists with an extra (without the) trailing slash for the -// given path. -func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) { -walk: // outer loop for walking the tree - for { - if len(path) > len(n.path) { - if path[:len(n.path)] == n.path { - path = path[len(n.path):] - // If this node does not have a wildcard (param or catchAll) - // child, we can just look up the next child node and continue - // to walk down the tree - if !n.wildChild { - c := path[0] - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - n = n.children[i] - continue walk - } - } - - // Nothing found. - // We can recommend to redirect to the same URL without a - // trailing slash if a leaf exists for that path. - tsr = (path == "/" && n.handle != nil) - return - - } - - // handle wildcard child - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - end := 0 - for end < len(path) && path[end] != '/' { - end++ - } - - // save param value - if p == nil { - // lazy allocation - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[1:] - p[i].Value = path[:end] - - // we need to go deeper! - if end < len(path) { - if len(n.children) > 0 { - path = path[end:] - n = n.children[0] - continue walk - } - - // ... but we can't - tsr = (len(path) == end+1) - return - } - - if handle = n.handle; handle != nil { - return - } else if len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists for TSR recommendation - n = n.children[0] - tsr = (n.path == "/" && n.handle != nil) - } - - return - - case catchAll: - // save param value - if p == nil { - // lazy allocation - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[2:] - p[i].Value = path - - handle = n.handle - return - - default: - panic("invalid node type") - } - } - } else if path == n.path { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if handle = n.handle; handle != nil { - return - } - - if path == "/" && n.wildChild && n.nType != root { - tsr = true - return - } - - // No handle found. Check if a handle for this path + a - // trailing slash exists for trailing slash recommendation - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - tsr = (len(n.path) == 1 && n.handle != nil) || - (n.nType == catchAll && n.children[0].handle != nil) - return - } - } - - return - } - - // Nothing found. We can recommend to redirect to the same URL with an - // extra trailing slash if a leaf exists for that path - tsr = (path == "/") || - (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && - path == n.path[:len(n.path)-1] && n.handle != nil) - return - } -} - -// Makes a case-insensitive lookup of the given path and tries to find a handler. -// It can optionally also fix trailing slashes. -// It returns the case-corrected path and a bool indicating whether the lookup -// was successful. -func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { - return n.findCaseInsensitivePathRec( - path, - strings.ToLower(path), - make([]byte, 0, len(path)+1), // preallocate enough memory for new path - [4]byte{}, // empty rune buffer - fixTrailingSlash, - ) -} - -// shift bytes in array by n bytes left -func shiftNRuneBytes(rb [4]byte, n int) [4]byte { - switch n { - case 0: - return rb - case 1: - return [4]byte{rb[1], rb[2], rb[3], 0} - case 2: - return [4]byte{rb[2], rb[3]} - case 3: - return [4]byte{rb[3]} - default: - return [4]byte{} - } -} - -// recursive case-insensitive lookup function used by n.findCaseInsensitivePath -func (n *node) findCaseInsensitivePathRec(path, loPath string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) ([]byte, bool) { - loNPath := strings.ToLower(n.path) - -walk: // outer loop for walking the tree - for len(loPath) >= len(loNPath) && (len(loNPath) == 0 || loPath[1:len(loNPath)] == loNPath[1:]) { - // add common path to result - ciPath = append(ciPath, n.path...) - - if path = path[len(n.path):]; len(path) > 0 { - loOld := loPath - loPath = loPath[len(loNPath):] - - // If this node does not have a wildcard (param or catchAll) child, - // we can just look up the next child node and continue to walk down - // the tree - if !n.wildChild { - // skip rune bytes already processed - rb = shiftNRuneBytes(rb, len(loNPath)) - - if rb[0] != 0 { - // old rune not finished - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == rb[0] { - // continue with child node - n = n.children[i] - loNPath = strings.ToLower(n.path) - continue walk - } - } - } else { - // process a new rune - var rv rune - - // find rune start - // runes are up to 4 byte long, - // -4 would definitely be another rune - var off int - for max := min(len(loNPath), 3); off < max; off++ { - if i := len(loNPath) - off; utf8.RuneStart(loOld[i]) { - // read rune from cached lowercase path - rv, _ = utf8.DecodeRuneInString(loOld[i:]) - break - } - } - - // calculate lowercase bytes of current rune - utf8.EncodeRune(rb[:], rv) - // skipp already processed bytes - rb = shiftNRuneBytes(rb, off) - - for i := 0; i < len(n.indices); i++ { - // lowercase matches - if n.indices[i] == rb[0] { - // must use a recursive approach since both the - // uppercase byte and the lowercase byte might exist - // as an index - if out, found := n.children[i].findCaseInsensitivePathRec( - path, loPath, ciPath, rb, fixTrailingSlash, - ); found { - return out, true - } - break - } - } - - // same for uppercase rune, if it differs - if up := unicode.ToUpper(rv); up != rv { - utf8.EncodeRune(rb[:], up) - rb = shiftNRuneBytes(rb, off) - - for i := 0; i < len(n.indices); i++ { - // uppercase matches - if n.indices[i] == rb[0] { - // continue with child node - n = n.children[i] - loNPath = strings.ToLower(n.path) - continue walk - } - } - } - } - - // Nothing found. We can recommend to redirect to the same URL - // without a trailing slash if a leaf exists for that path - return ciPath, (fixTrailingSlash && path == "/" && n.handle != nil) - } - - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - k := 0 - for k < len(path) && path[k] != '/' { - k++ - } - - // add param value to case insensitive path - ciPath = append(ciPath, path[:k]...) - - // we need to go deeper! - if k < len(path) { - if len(n.children) > 0 { - // continue with child node - n = n.children[0] - loNPath = strings.ToLower(n.path) - loPath = loPath[k:] - path = path[k:] - continue - } - - // ... but we can't - if fixTrailingSlash && len(path) == k+1 { - return ciPath, true - } - return ciPath, false - } - - if n.handle != nil { - return ciPath, true - } else if fixTrailingSlash && len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists - n = n.children[0] - if n.path == "/" && n.handle != nil { - return append(ciPath, '/'), true - } - } - return ciPath, false - - case catchAll: - return append(ciPath, path...), true - - default: - panic("invalid node type") - } - } else { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if n.handle != nil { - return ciPath, true - } - - // No handle found. - // Try to fix the path by adding a trailing slash - if fixTrailingSlash { - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - if (len(n.path) == 1 && n.handle != nil) || - (n.nType == catchAll && n.children[0].handle != nil) { - return append(ciPath, '/'), true - } - return ciPath, false - } - } - } - return ciPath, false - } - } - - // Nothing found. - // Try to fix the path by adding / removing a trailing slash - if fixTrailingSlash { - if path == "/" { - return ciPath, true - } - if len(loPath)+1 == len(loNPath) && loNPath[len(loPath)] == '/' && - loPath[1:] == loNPath[1:len(loPath)] && n.handle != nil { - return append(ciPath, n.path...), true - } - } - return ciPath, false -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index bf3eed6..0000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e..0000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 273db3c..0000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## Licence - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932ead..0000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 842ee80..0000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,269 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// and the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required the errors.WithStack and errors.WithMessage -// functions destructure errors.Wrap into its component operations of annotating -// an error with a stack trace and an a message, respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error which does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// causer interface is not exported by this package, but is considered a part -// of stable public API. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported -// -// %s print the error. If the error has a Cause it will be -// printed recursively -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface. -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// Where errors.StackTrace is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) -// } -// } -// -// stackTracer interface is not exported by this package, but is considered a part -// of stable public API. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is call, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index cbe3f3e..0000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,186 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strings" -) - -// Frame represents a program counter inside a stack frame. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s path of source file relative to the compile time GOPATH -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - fmt.Fprintf(s, "%d", f.line()) - case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - fmt.Fprintf(s, "%v", []Frame(st)) - } - case 's': - fmt.Fprintf(s, "%s", []Frame(st)) - } -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} - -func trimGOPATH(name, file string) string { - // Here we want to get the source file path relative to the compile time - // GOPATH. As of Go 1.6.x there is no direct way to know the compiled - // GOPATH at runtime, but we can infer the number of path segments in the - // GOPATH. We note that fn.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired output. We count separators from the end of the file - // path until it finds two more than in the function name and then move - // one character forward to preserve the initial path segment without a - // leading separator. - const sep = "/" - goal := strings.Count(name, sep) + 2 - i := len(file) - for n := 0; n < goal; n++ { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - // not enough separators found, set i so that the slice expression - // below leaves file unmodified - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - file = file[i+len(sep):] - return file -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad6..0000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99f..0000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/rakyll/statik/LICENSE b/vendor/github.com/rakyll/statik/LICENSE deleted file mode 100644 index a4c5efd..0000000 --- a/vendor/github.com/rakyll/statik/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/rakyll/statik/fs/fs.go b/vendor/github.com/rakyll/statik/fs/fs.go deleted file mode 100644 index e328d5a..0000000 --- a/vendor/github.com/rakyll/statik/fs/fs.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package contains an HTTP file system that works with zip contents. -package fs - -import ( - "archive/zip" - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "os" - "strings" - "sync" -) - -var zipData string - -type statikFS struct { - files map[string]*zip.File -} - -// Registers zip contents data, later used to initialize -// the statik file system. -func Register(data string) { - zipData = data -} - -// Creates a new file system with the registered zip contents data. -func New() (http.FileSystem, error) { - if zipData == "" { - return nil, errors.New("statik/fs: No zip data registered.") - } - zipReader, err := zip.NewReader(strings.NewReader(zipData), int64(len(zipData))) - if err != nil { - return nil, err - } - files := make(map[string]*zip.File) - for _, file := range zipReader.File { - files["/"+file.Name] = file - } - return &statikFS{files: files}, nil -} - -// Opens a file, unzip the contents and initializes -// readers. Returns os.ErrNotExists if file is not -// found in the archive. -func (fs *statikFS) Open(name string) (http.File, error) { - name = strings.Replace(name, "//", "/", -1) - f, ok := fs.files[name] - - // The file doesn't match, but maybe it's a directory, - // thus we should look for index.html - if !ok { - indexName := strings.Replace(name+"/index.html", "//", "/", -1) - f, ok = fs.files[indexName] - - if !ok { - return nil, os.ErrNotExist - } - - return newFile(f, true) - } - return newFile(f, false) -} - -var nopCloser = ioutil.NopCloser(nil) - -func newFile(zf *zip.File, isDir bool) (*file, error) { - rc, err := zf.Open() - if err != nil { - return nil, err - } - defer rc.Close() - all, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - return &file{ - FileInfo: zf.FileInfo(), - data: all, - readerAt: bytes.NewReader(all), - Closer: nopCloser, - isDir: isDir, - }, nil -} - -// Represents an HTTP file, acts as a bridge between -// zip.File and http.File. -type file struct { - os.FileInfo - io.Closer - - data []byte // non-nil if regular file - reader *io.SectionReader - readerAt io.ReaderAt // over data - isDir bool - - once sync.Once -} - -func (f *file) newReader() { - f.reader = io.NewSectionReader(f.readerAt, 0, f.FileInfo.Size()) -} - -// Reads bytes into p, returns the number of read bytes. -func (f *file) Read(p []byte) (n int, err error) { - f.once.Do(f.newReader) - return f.reader.Read(p) -} - -// Seeks to the offset. -func (f *file) Seek(offset int64, whence int) (ret int64, err error) { - f.once.Do(f.newReader) - return f.reader.Seek(offset, whence) -} - -// Stats the file. -func (f *file) Stat() (os.FileInfo, error) { - return f, nil -} - -// IsDir returns true if the file location represents a directory. -func (f *file) IsDir() bool { - return f.isDir -} - -// Returns an empty slice of files, directory -// listing is disabled. -func (f *file) Readdir(count int) ([]os.FileInfo, error) { - // directory listing is disabled. - return make([]os.FileInfo, 0), nil -} diff --git a/vendor/github.com/stretchr/testify/LICENCE.txt b/vendor/github.com/stretchr/testify/LICENCE.txt deleted file mode 100644 index 473b670..0000000 --- a/vendor/github.com/stretchr/testify/LICENCE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 473b670..0000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index aa4311f..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,352 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - return Condition(a.t, comp, msgAndArgs...) -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - return Contains(a.t, s, contains, msgAndArgs...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - return Empty(a.t, object, msgAndArgs...) -} - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString, "An error was expected") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - return Error(a.t, err, msgAndArgs...) -} - -// Exactly asserts that two objects are equal is value and type. -// -// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - return FailNow(a.t, failureMessage, msgAndArgs...) -} - -// False asserts that the specified value is false. -// -// a.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - return False(a.t, value, msgAndArgs...) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyContains(a.t, handler, method, url, values, str) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyNotContains(a.t, handler, method, url, values, str) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPError(a.t, handler, method, url, values) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPRedirect(a.t, handler, method, url, values) -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPSuccess(a.t, handler, method, url, values) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - return Len(a.t, object, length, msgAndArgs...) -} - -// Nil asserts that the specified object is nil. -// -// a.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - return Nil(a.t, object, msgAndArgs...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - return NoError(a.t, err, msgAndArgs...) -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - return NotEmpty(a.t, object, msgAndArgs...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - return NotNil(a.t, object, msgAndArgs...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return NotPanics(a.t, f, msgAndArgs...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - return NotZero(a.t, i, msgAndArgs...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return Panics(a.t, f, msgAndArgs...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// True asserts that the specified value is true. -// -// a.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - return True(a.t, value, msgAndArgs...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// Zero asserts that i is the zero value for its type and returns the truth. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - return Zero(a.t, i, msgAndArgs...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 99f9acf..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,4 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index b33045d..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1069 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "math" - "reflect" - "regexp" - "runtime" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - - if expected == nil || actual == nil { - return expected == actual - } - - return reflect.DeepEqual(expected, actual) - -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occurred in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - pc := uintptr(0) - file := "" - line := 0 - ok := false - name := "" - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } - - parts := strings.Split(file, "/") - dir := parts[len(parts)-2] - file = parts[len(parts)-1] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -// getWhitespaceString returns a string that is long enough to overwrite the default -// output from the go testing framework. -func getWhitespaceString() string { - - _, file, line, ok := runtime.Caller(1) - if !ok { - return "" - } - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - - return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) - -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Aligns the provided message so that all lines after the first line start at the same location as the first line. -// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the -// basis on which the alignment occurs). -func indentMessageLines(message string, longestLabelLen int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - // no need to align first line because it starts at the correct location (after the label) - if i != 0 { - // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")}, - {"Error", failureMessage}, - } - - message := messageFromMsgAndArgs(msgAndArgs...) - if len(message) > 0 { - content = append(content, labeledContent{"Messages", message}) - } - - t.Errorf("\r" + getWhitespaceString() + labeledOutput(content...)) - - return false -} - -type labeledContent struct { - label string - content string -} - -// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: -// -// \r\t{{label}}:{{align_spaces}}\t{{content}}\n -// -// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. -// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this -// alignment is achieved, "\t{{content}}\n" is added for the output. -// -// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. -func labeledOutput(content ...labeledContent) string { - longestLabel := 0 - for _, v := range content { - if len(v.label) > longestLabel { - longestLabel = len(v.label) - } - } - var output string - for _, v := range content { - output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" - } - return output -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true - -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "received: %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// formatUnequalValues takes two values of arbitrary types and returns string -// representations appropriate to be presented to the user. -// -// If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar -// to a type conversion in the Go grammar. -func formatUnequalValues(expected, actual interface{}) (e string, a string) { - if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%#v)", expected, expected), - fmt.Sprintf("%T(%#v)", actual, actual) - } - - return fmt.Sprintf("%#v", expected), - fmt.Sprintf("%#v", actual) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "received: %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if !isNil(object) { - return true - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -var numericZeros = []interface{}{ - int(0), - int8(0), - int16(0), - int32(0), - int64(0), - uint(0), - uint8(0), - uint16(0), - uint32(0), - uint64(0), - float32(0), - float64(0), -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - if object == nil { - return true - } else if object == "" { - return true - } else if object == false { - return true - } - - for _, v := range numericZeros { - if object == v { - return true - } - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - case reflect.Map: - fallthrough - case reflect.Slice, reflect.Chan: - { - return (objValue.Len() == 0) - } - case reflect.Struct: - switch object.(type) { - case time.Time: - return object.(time.Time).IsZero() - } - case reflect.Ptr: - { - if objValue.IsNil() { - return true - } - switch object.(type) { - case *time.Time: - return object.(*time.Time).IsZero() - default: - return false - } - } - } - return false -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -// -// Returns whether the assertion was successful (true) or not (false). -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if reflect.TypeOf(list).Kind() == reflect.String { - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if reflect.TypeOf(list).Kind() == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) - if !result { - return result - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err != nil { - return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - - if err == nil { - return Fail(t, "An error is expected but got nil.", msgAndArgs...) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString, "An error was expected") -// -// Returns whether the assertion was successful (true) or not (false). -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - if !Error(t, theError, msgAndArgs...) { - return false - } - expected := errString - actual := theError.Error() - // don't need to use deep equals here, we know they are both strings - if expected != actual { - return Fail(t, fmt.Sprintf("Error message not equal:\n"+ - "expected: %q\n"+ - "received: %q", expected, actual), msgAndArgs...) - } - return true -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type and returns the truth. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { - return "" - } - - e := spewConfig.Sdump(expected) - a := spewConfig.Sdump(actual) - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4..0000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d..0000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index b867e95..0000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index fa7ab89..0000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,106 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 -// if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return -1 - } - handler(w, req) - return w.Code -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusOK && code <= http.StatusPartialContent -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusBadRequest -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return !contains -} diff --git a/vendor/github.com/vaughan0/go-ini/LICENSE b/vendor/github.com/vaughan0/go-ini/LICENSE deleted file mode 100644 index 968b453..0000000 --- a/vendor/github.com/vaughan0/go-ini/LICENSE +++ /dev/null @@ -1,14 +0,0 @@ -Copyright (c) 2013 Vaughan Newton - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/vaughan0/go-ini/README.md b/vendor/github.com/vaughan0/go-ini/README.md deleted file mode 100644 index d5cd4e7..0000000 --- a/vendor/github.com/vaughan0/go-ini/README.md +++ /dev/null @@ -1,70 +0,0 @@ -go-ini -====== - -INI parsing library for Go (golang). - -View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini). - -Usage ------ - -Parse an INI file: - -```go -import "github.com/vaughan0/go-ini" - -file, err := ini.LoadFile("myfile.ini") -``` - -Get data from the parsed file: - -```go -name, ok := file.Get("person", "name") -if !ok { - panic("'name' variable missing from 'person' section") -} -``` - -Iterate through values in a section: - -```go -for key, value := range file["mysection"] { - fmt.Printf("%s => %s\n", key, value) -} -``` - -Iterate through sections in a file: - -```go -for name, section := range file { - fmt.Printf("Section name: %s\n", name) -} -``` - -File Format ------------ - -INI files are parsed by go-ini line-by-line. Each line may be one of the following: - - * A section definition: [section-name] - * A property: key = value - * A comment: #blahblah _or_ ;blahblah - * Blank. The line will be ignored. - -Properties defined before any section headers are placed in the default section, which has -the empty string as it's key. - -Example: - -```ini -# I am a comment -; So am I! - -[apples] -colour = red or green -shape = applish - -[oranges] -shape = square -colour = blue -``` diff --git a/vendor/github.com/vaughan0/go-ini/ini.go b/vendor/github.com/vaughan0/go-ini/ini.go deleted file mode 100644 index 81aeb32..0000000 --- a/vendor/github.com/vaughan0/go-ini/ini.go +++ /dev/null @@ -1,123 +0,0 @@ -// Package ini provides functions for parsing INI configuration files. -package ini - -import ( - "bufio" - "fmt" - "io" - "os" - "regexp" - "strings" -) - -var ( - sectionRegex = regexp.MustCompile(`^\[(.*)\]$`) - assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) -) - -// ErrSyntax is returned when there is a syntax error in an INI file. -type ErrSyntax struct { - Line int - Source string // The contents of the erroneous line, without leading or trailing whitespace -} - -func (e ErrSyntax) Error() string { - return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source) -} - -// A File represents a parsed INI file. -type File map[string]Section - -// A Section represents a single section of an INI file. -type Section map[string]string - -// Returns a named Section. A Section will be created if one does not already exist for the given name. -func (f File) Section(name string) Section { - section := f[name] - if section == nil { - section = make(Section) - f[name] = section - } - return section -} - -// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup. -func (f File) Get(section, key string) (value string, ok bool) { - if s := f[section]; s != nil { - value, ok = s[key] - } - return -} - -// Loads INI data from a reader and stores the data in the File. -func (f File) Load(in io.Reader) (err error) { - bufin, ok := in.(*bufio.Reader) - if !ok { - bufin = bufio.NewReader(in) - } - return parseFile(bufin, f) -} - -// Loads INI data from a named file and stores the data in the File. -func (f File) LoadFile(file string) (err error) { - in, err := os.Open(file) - if err != nil { - return - } - defer in.Close() - return f.Load(in) -} - -func parseFile(in *bufio.Reader, file File) (err error) { - section := "" - lineNum := 0 - for done := false; !done; { - var line string - if line, err = in.ReadString('\n'); err != nil { - if err == io.EOF { - done = true - } else { - return - } - } - lineNum++ - line = strings.TrimSpace(line) - if len(line) == 0 { - // Skip blank lines - continue - } - if line[0] == ';' || line[0] == '#' { - // Skip comments - continue - } - - if groups := assignRegex.FindStringSubmatch(line); groups != nil { - key, val := groups[1], groups[2] - key, val = strings.TrimSpace(key), strings.TrimSpace(val) - file.Section(section)[key] = val - } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil { - name := strings.TrimSpace(groups[1]) - section = name - // Create the section if it does not exist - file.Section(section) - } else { - return ErrSyntax{lineNum, line} - } - - } - return nil -} - -// Loads and returns a File from a reader. -func Load(in io.Reader) (File, error) { - file := make(File) - err := file.Load(in) - return file, err -} - -// Loads and returns an INI File from a file on disk. -func LoadFile(filename string) (File, error) { - file := make(File) - err := file.LoadFile(filename) - return file, err -} diff --git a/vendor/github.com/vaughan0/go-ini/test.ini b/vendor/github.com/vaughan0/go-ini/test.ini deleted file mode 100644 index d13c999..0000000 --- a/vendor/github.com/vaughan0/go-ini/test.ini +++ /dev/null @@ -1,2 +0,0 @@ -[default] -stuff = things diff --git a/vendor/github.com/xtaci/smux/.gitignore b/vendor/github.com/xtaci/smux/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/xtaci/smux/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/xtaci/smux/.travis.yml b/vendor/github.com/xtaci/smux/.travis.yml deleted file mode 100644 index 1ad083c..0000000 --- a/vendor/github.com/xtaci/smux/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go: - - tip - -before_install: - - go get -t -v ./... - -install: - - go get github.com/xtaci/smux - -script: - - go test -coverprofile=coverage.txt -covermode=atomic -bench . - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/xtaci/smux/LICENSE b/vendor/github.com/xtaci/smux/LICENSE deleted file mode 100644 index eed41ac..0000000 --- a/vendor/github.com/xtaci/smux/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016-2017 Daniel Fu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/xtaci/smux/README.md b/vendor/github.com/xtaci/smux/README.md deleted file mode 100644 index c01c346..0000000 --- a/vendor/github.com/xtaci/smux/README.md +++ /dev/null @@ -1,99 +0,0 @@ -smux - -[![GoDoc][1]][2] [![MIT licensed][3]][4] [![Build Status][5]][6] [![Go Report Card][7]][8] [![Coverage Statusd][9]][10] - -smux - -[1]: https://godoc.org/github.com/xtaci/smux?status.svg -[2]: https://godoc.org/github.com/xtaci/smux -[3]: https://img.shields.io/badge/license-MIT-blue.svg -[4]: LICENSE -[5]: https://travis-ci.org/xtaci/smux.svg?branch=master -[6]: https://travis-ci.org/xtaci/smux -[7]: https://goreportcard.com/badge/github.com/xtaci/smux -[8]: https://goreportcard.com/report/github.com/xtaci/smux -[9]: https://codecov.io/gh/xtaci/smux/branch/master/graph/badge.svg -[10]: https://codecov.io/gh/xtaci/smux - -## Introduction - -Smux ( **S**imple **MU**ltiple**X**ing) is a multiplexing library for Golang. It relies on an underlying connection to provide reliability and ordering, such as TCP or [KCP](https://github.com/xtaci/kcp-go), and provides stream-oriented multiplexing. The original intention of this library is to power the connection management for [kcp-go](https://github.com/xtaci/kcp-go). - -## Features - -1. Tiny, less than 600 LOC. -2. ***Token bucket*** controlled receiving, which provides smoother bandwidth graph(see picture below). -3. Session-wide receive buffer, shared among streams, tightly controlled overall memory usage. -4. Minimized header(8Bytes), maximized payload. -5. Well-tested on millions of devices in [kcptun](https://github.com/xtaci/kcptun). - -![smooth bandwidth curve](curve.jpg) - -## Documentation - -For complete documentation, see the associated [Godoc](https://godoc.org/github.com/xtaci/smux). - -## Specification - -``` -VERSION(1B) | CMD(1B) | LENGTH(2B) | STREAMID(4B) | DATA(LENGTH) -``` - -## Usage - -The API of smux are mostly taken from [yamux](https://github.com/hashicorp/yamux) - -```go - -func client() { - // Get a TCP connection - conn, err := net.Dial(...) - if err != nil { - panic(err) - } - - // Setup client side of smux - session, err := smux.Client(conn, nil) - if err != nil { - panic(err) - } - - // Open a new stream - stream, err := session.OpenStream() - if err != nil { - panic(err) - } - - // Stream implements io.ReadWriteCloser - stream.Write([]byte("ping")) -} - -func server() { - // Accept a TCP connection - conn, err := listener.Accept() - if err != nil { - panic(err) - } - - // Setup server side of smux - session, err := smux.Server(conn, nil) - if err != nil { - panic(err) - } - - // Accept a stream - stream, err := session.AcceptStream() - if err != nil { - panic(err) - } - - // Listen for a message - buf := make([]byte, 4) - stream.Read(buf) -} - -``` - -## Status - -Stable diff --git a/vendor/github.com/xtaci/smux/curve.jpg b/vendor/github.com/xtaci/smux/curve.jpg deleted file mode 100644 index 3fc4863..0000000 Binary files a/vendor/github.com/xtaci/smux/curve.jpg and /dev/null differ diff --git a/vendor/github.com/xtaci/smux/frame.go b/vendor/github.com/xtaci/smux/frame.go deleted file mode 100644 index 36062d7..0000000 --- a/vendor/github.com/xtaci/smux/frame.go +++ /dev/null @@ -1,60 +0,0 @@ -package smux - -import ( - "encoding/binary" - "fmt" -) - -const ( - version = 1 -) - -const ( // cmds - cmdSYN byte = iota // stream open - cmdFIN // stream close, a.k.a EOF mark - cmdPSH // data push - cmdNOP // no operation -) - -const ( - sizeOfVer = 1 - sizeOfCmd = 1 - sizeOfLength = 2 - sizeOfSid = 4 - headerSize = sizeOfVer + sizeOfCmd + sizeOfSid + sizeOfLength -) - -// Frame defines a packet from or to be multiplexed into a single connection -type Frame struct { - ver byte - cmd byte - sid uint32 - data []byte -} - -func newFrame(cmd byte, sid uint32) Frame { - return Frame{ver: version, cmd: cmd, sid: sid} -} - -type rawHeader []byte - -func (h rawHeader) Version() byte { - return h[0] -} - -func (h rawHeader) Cmd() byte { - return h[1] -} - -func (h rawHeader) Length() uint16 { - return binary.LittleEndian.Uint16(h[2:]) -} - -func (h rawHeader) StreamID() uint32 { - return binary.LittleEndian.Uint32(h[4:]) -} - -func (h rawHeader) String() string { - return fmt.Sprintf("Version:%d Cmd:%d StreamID:%d Length:%d", - h.Version(), h.Cmd(), h.StreamID(), h.Length()) -} diff --git a/vendor/github.com/xtaci/smux/mux.go b/vendor/github.com/xtaci/smux/mux.go deleted file mode 100644 index afcf58b..0000000 --- a/vendor/github.com/xtaci/smux/mux.go +++ /dev/null @@ -1,80 +0,0 @@ -package smux - -import ( - "fmt" - "io" - "time" - - "github.com/pkg/errors" -) - -// Config is used to tune the Smux session -type Config struct { - // KeepAliveInterval is how often to send a NOP command to the remote - KeepAliveInterval time.Duration - - // KeepAliveTimeout is how long the session - // will be closed if no data has arrived - KeepAliveTimeout time.Duration - - // MaxFrameSize is used to control the maximum - // frame size to sent to the remote - MaxFrameSize int - - // MaxReceiveBuffer is used to control the maximum - // number of data in the buffer pool - MaxReceiveBuffer int -} - -// DefaultConfig is used to return a default configuration -func DefaultConfig() *Config { - return &Config{ - KeepAliveInterval: 10 * time.Second, - KeepAliveTimeout: 30 * time.Second, - MaxFrameSize: 4096, - MaxReceiveBuffer: 4194304, - } -} - -// VerifyConfig is used to verify the sanity of configuration -func VerifyConfig(config *Config) error { - if config.KeepAliveInterval == 0 { - return errors.New("keep-alive interval must be positive") - } - if config.KeepAliveTimeout < config.KeepAliveInterval { - return fmt.Errorf("keep-alive timeout must be larger than keep-alive interval") - } - if config.MaxFrameSize <= 0 { - return errors.New("max frame size must be positive") - } - if config.MaxFrameSize > 65535 { - return errors.New("max frame size must not be larger than 65535") - } - if config.MaxReceiveBuffer <= 0 { - return errors.New("max receive buffer must be positive") - } - return nil -} - -// Server is used to initialize a new server-side connection. -func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { - if config == nil { - config = DefaultConfig() - } - if err := VerifyConfig(config); err != nil { - return nil, err - } - return newSession(config, conn, false), nil -} - -// Client is used to initialize a new client-side connection. -func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { - if config == nil { - config = DefaultConfig() - } - - if err := VerifyConfig(config); err != nil { - return nil, err - } - return newSession(config, conn, true), nil -} diff --git a/vendor/github.com/xtaci/smux/mux.jpg b/vendor/github.com/xtaci/smux/mux.jpg deleted file mode 100644 index dde2e11..0000000 Binary files a/vendor/github.com/xtaci/smux/mux.jpg and /dev/null differ diff --git a/vendor/github.com/xtaci/smux/session.go b/vendor/github.com/xtaci/smux/session.go deleted file mode 100644 index 12fc4cb..0000000 --- a/vendor/github.com/xtaci/smux/session.go +++ /dev/null @@ -1,353 +0,0 @@ -package smux - -import ( - "encoding/binary" - "io" - "sync" - "sync/atomic" - "time" - - "github.com/pkg/errors" -) - -const ( - defaultAcceptBacklog = 1024 -) - -const ( - errBrokenPipe = "broken pipe" - errInvalidProtocol = "invalid protocol version" - errGoAway = "stream id overflows, should start a new connection" -) - -type writeRequest struct { - frame Frame - result chan writeResult -} - -type writeResult struct { - n int - err error -} - -// Session defines a multiplexed connection for streams -type Session struct { - conn io.ReadWriteCloser - - config *Config - nextStreamID uint32 // next stream identifier - nextStreamIDLock sync.Mutex - - bucket int32 // token bucket - bucketNotify chan struct{} // used for waiting for tokens - - streams map[uint32]*Stream // all streams in this session - streamLock sync.Mutex // locks streams - - die chan struct{} // flag session has died - dieLock sync.Mutex - chAccepts chan *Stream - - dataReady int32 // flag data has arrived - - goAway int32 // flag id exhausted - - deadline atomic.Value - - writes chan writeRequest -} - -func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { - s := new(Session) - s.die = make(chan struct{}) - s.conn = conn - s.config = config - s.streams = make(map[uint32]*Stream) - s.chAccepts = make(chan *Stream, defaultAcceptBacklog) - s.bucket = int32(config.MaxReceiveBuffer) - s.bucketNotify = make(chan struct{}, 1) - s.writes = make(chan writeRequest) - - if client { - s.nextStreamID = 1 - } else { - s.nextStreamID = 0 - } - go s.recvLoop() - go s.sendLoop() - go s.keepalive() - return s -} - -// OpenStream is used to create a new stream -func (s *Session) OpenStream() (*Stream, error) { - if s.IsClosed() { - return nil, errors.New(errBrokenPipe) - } - - // generate stream id - s.nextStreamIDLock.Lock() - if s.goAway > 0 { - s.nextStreamIDLock.Unlock() - return nil, errors.New(errGoAway) - } - - s.nextStreamID += 2 - sid := s.nextStreamID - if sid == sid%2 { // stream-id overflows - s.goAway = 1 - s.nextStreamIDLock.Unlock() - return nil, errors.New(errGoAway) - } - s.nextStreamIDLock.Unlock() - - stream := newStream(sid, s.config.MaxFrameSize, s) - - if _, err := s.writeFrame(newFrame(cmdSYN, sid)); err != nil { - return nil, errors.Wrap(err, "writeFrame") - } - - s.streamLock.Lock() - s.streams[sid] = stream - s.streamLock.Unlock() - return stream, nil -} - -// AcceptStream is used to block until the next available stream -// is ready to be accepted. -func (s *Session) AcceptStream() (*Stream, error) { - var deadline <-chan time.Time - if d, ok := s.deadline.Load().(time.Time); ok && !d.IsZero() { - timer := time.NewTimer(d.Sub(time.Now())) - defer timer.Stop() - deadline = timer.C - } - select { - case stream := <-s.chAccepts: - return stream, nil - case <-deadline: - return nil, errTimeout - case <-s.die: - return nil, errors.New(errBrokenPipe) - } -} - -// Close is used to close the session and all streams. -func (s *Session) Close() (err error) { - s.dieLock.Lock() - - select { - case <-s.die: - s.dieLock.Unlock() - return errors.New(errBrokenPipe) - default: - close(s.die) - s.dieLock.Unlock() - s.streamLock.Lock() - for k := range s.streams { - s.streams[k].sessionClose() - } - s.streamLock.Unlock() - s.notifyBucket() - return s.conn.Close() - } -} - -// notifyBucket notifies recvLoop that bucket is available -func (s *Session) notifyBucket() { - select { - case s.bucketNotify <- struct{}{}: - default: - } -} - -// IsClosed does a safe check to see if we have shutdown -func (s *Session) IsClosed() bool { - select { - case <-s.die: - return true - default: - return false - } -} - -// NumStreams returns the number of currently open streams -func (s *Session) NumStreams() int { - if s.IsClosed() { - return 0 - } - s.streamLock.Lock() - defer s.streamLock.Unlock() - return len(s.streams) -} - -// SetDeadline sets a deadline used by Accept* calls. -// A zero time value disables the deadline. -func (s *Session) SetDeadline(t time.Time) error { - s.deadline.Store(t) - return nil -} - -// notify the session that a stream has closed -func (s *Session) streamClosed(sid uint32) { - s.streamLock.Lock() - if n := s.streams[sid].recycleTokens(); n > 0 { // return remaining tokens to the bucket - if atomic.AddInt32(&s.bucket, int32(n)) > 0 { - s.notifyBucket() - } - } - delete(s.streams, sid) - s.streamLock.Unlock() -} - -// returnTokens is called by stream to return token after read -func (s *Session) returnTokens(n int) { - if atomic.AddInt32(&s.bucket, int32(n)) > 0 { - s.notifyBucket() - } -} - -// session read a frame from underlying connection -// it's data is pointed to the input buffer -func (s *Session) readFrame(buffer []byte) (f Frame, err error) { - if _, err := io.ReadFull(s.conn, buffer[:headerSize]); err != nil { - return f, errors.Wrap(err, "readFrame") - } - - dec := rawHeader(buffer) - if dec.Version() != version { - return f, errors.New(errInvalidProtocol) - } - - f.ver = dec.Version() - f.cmd = dec.Cmd() - f.sid = dec.StreamID() - if length := dec.Length(); length > 0 { - if _, err := io.ReadFull(s.conn, buffer[headerSize:headerSize+length]); err != nil { - return f, errors.Wrap(err, "readFrame") - } - f.data = buffer[headerSize : headerSize+length] - } - return f, nil -} - -// recvLoop keeps on reading from underlying connection if tokens are available -func (s *Session) recvLoop() { - buffer := make([]byte, (1<<16)+headerSize) - for { - for atomic.LoadInt32(&s.bucket) <= 0 && !s.IsClosed() { - <-s.bucketNotify - } - - if f, err := s.readFrame(buffer); err == nil { - atomic.StoreInt32(&s.dataReady, 1) - - switch f.cmd { - case cmdNOP: - case cmdSYN: - s.streamLock.Lock() - if _, ok := s.streams[f.sid]; !ok { - stream := newStream(f.sid, s.config.MaxFrameSize, s) - s.streams[f.sid] = stream - select { - case s.chAccepts <- stream: - case <-s.die: - } - } - s.streamLock.Unlock() - case cmdFIN: - s.streamLock.Lock() - if stream, ok := s.streams[f.sid]; ok { - stream.markRST() - stream.notifyReadEvent() - } - s.streamLock.Unlock() - case cmdPSH: - s.streamLock.Lock() - if stream, ok := s.streams[f.sid]; ok { - atomic.AddInt32(&s.bucket, -int32(len(f.data))) - stream.pushBytes(f.data) - stream.notifyReadEvent() - } - s.streamLock.Unlock() - default: - s.Close() - return - } - } else { - s.Close() - return - } - } -} - -func (s *Session) keepalive() { - tickerPing := time.NewTicker(s.config.KeepAliveInterval) - tickerTimeout := time.NewTicker(s.config.KeepAliveTimeout) - defer tickerPing.Stop() - defer tickerTimeout.Stop() - for { - select { - case <-tickerPing.C: - s.writeFrame(newFrame(cmdNOP, 0)) - s.notifyBucket() // force a signal to the recvLoop - case <-tickerTimeout.C: - if !atomic.CompareAndSwapInt32(&s.dataReady, 1, 0) { - s.Close() - return - } - case <-s.die: - return - } - } -} - -func (s *Session) sendLoop() { - buf := make([]byte, (1<<16)+headerSize) - for { - select { - case <-s.die: - return - case request, ok := <-s.writes: - if !ok { - continue - } - buf[0] = request.frame.ver - buf[1] = request.frame.cmd - binary.LittleEndian.PutUint16(buf[2:], uint16(len(request.frame.data))) - binary.LittleEndian.PutUint32(buf[4:], request.frame.sid) - copy(buf[headerSize:], request.frame.data) - n, err := s.conn.Write(buf[:headerSize+len(request.frame.data)]) - - n -= headerSize - if n < 0 { - n = 0 - } - - result := writeResult{ - n: n, - err: err, - } - - request.result <- result - close(request.result) - } - } -} - -// writeFrame writes the frame to the underlying connection -// and returns the number of bytes written if successful -func (s *Session) writeFrame(f Frame) (n int, err error) { - req := writeRequest{ - frame: f, - result: make(chan writeResult, 1), - } - select { - case <-s.die: - return 0, errors.New(errBrokenPipe) - case s.writes <- req: - } - - result := <-req.result - return result.n, result.err -} diff --git a/vendor/github.com/xtaci/smux/smux.png b/vendor/github.com/xtaci/smux/smux.png deleted file mode 100644 index 26aba3b..0000000 Binary files a/vendor/github.com/xtaci/smux/smux.png and /dev/null differ diff --git a/vendor/github.com/xtaci/smux/stream.go b/vendor/github.com/xtaci/smux/stream.go deleted file mode 100644 index 8b8a52a..0000000 --- a/vendor/github.com/xtaci/smux/stream.go +++ /dev/null @@ -1,261 +0,0 @@ -package smux - -import ( - "bytes" - "io" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/pkg/errors" -) - -// Stream implements net.Conn -type Stream struct { - id uint32 - rstflag int32 - sess *Session - buffer bytes.Buffer - bufferLock sync.Mutex - frameSize int - chReadEvent chan struct{} // notify a read event - die chan struct{} // flag the stream has closed - dieLock sync.Mutex - readDeadline atomic.Value - writeDeadline atomic.Value -} - -// newStream initiates a Stream struct -func newStream(id uint32, frameSize int, sess *Session) *Stream { - s := new(Stream) - s.id = id - s.chReadEvent = make(chan struct{}, 1) - s.frameSize = frameSize - s.sess = sess - s.die = make(chan struct{}) - return s -} - -// ID returns the unique stream ID. -func (s *Stream) ID() uint32 { - return s.id -} - -// Read implements net.Conn -func (s *Stream) Read(b []byte) (n int, err error) { - var deadline <-chan time.Time - if d, ok := s.readDeadline.Load().(time.Time); ok && !d.IsZero() { - timer := time.NewTimer(d.Sub(time.Now())) - defer timer.Stop() - deadline = timer.C - } - -READ: - select { - case <-s.die: - return 0, errors.New(errBrokenPipe) - case <-deadline: - return n, errTimeout - default: - } - - s.bufferLock.Lock() - n, err = s.buffer.Read(b) - s.bufferLock.Unlock() - - if n > 0 { - s.sess.returnTokens(n) - return n, nil - } else if atomic.LoadInt32(&s.rstflag) == 1 { - _ = s.Close() - return 0, io.EOF - } - - select { - case <-s.chReadEvent: - goto READ - case <-deadline: - return n, errTimeout - case <-s.die: - return 0, errors.New(errBrokenPipe) - } -} - -// Write implements net.Conn -func (s *Stream) Write(b []byte) (n int, err error) { - var deadline <-chan time.Time - if d, ok := s.writeDeadline.Load().(time.Time); ok && !d.IsZero() { - timer := time.NewTimer(d.Sub(time.Now())) - defer timer.Stop() - deadline = timer.C - } - - select { - case <-s.die: - return 0, errors.New(errBrokenPipe) - default: - } - - frames := s.split(b, cmdPSH, s.id) - sent := 0 - for k := range frames { - req := writeRequest{ - frame: frames[k], - result: make(chan writeResult, 1), - } - - select { - case s.sess.writes <- req: - case <-s.die: - return sent, errors.New(errBrokenPipe) - case <-deadline: - return sent, errTimeout - } - - select { - case result := <-req.result: - sent += result.n - if result.err != nil { - return sent, result.err - } - case <-s.die: - return sent, errors.New(errBrokenPipe) - case <-deadline: - return sent, errTimeout - } - } - return sent, nil -} - -// Close implements net.Conn -func (s *Stream) Close() error { - s.dieLock.Lock() - - select { - case <-s.die: - s.dieLock.Unlock() - return errors.New(errBrokenPipe) - default: - close(s.die) - s.dieLock.Unlock() - s.sess.streamClosed(s.id) - _, err := s.sess.writeFrame(newFrame(cmdFIN, s.id)) - return err - } -} - -// SetReadDeadline sets the read deadline as defined by -// net.Conn.SetReadDeadline. -// A zero time value disables the deadline. -func (s *Stream) SetReadDeadline(t time.Time) error { - s.readDeadline.Store(t) - return nil -} - -// SetWriteDeadline sets the write deadline as defined by -// net.Conn.SetWriteDeadline. -// A zero time value disables the deadline. -func (s *Stream) SetWriteDeadline(t time.Time) error { - s.writeDeadline.Store(t) - return nil -} - -// SetDeadline sets both read and write deadlines as defined by -// net.Conn.SetDeadline. -// A zero time value disables the deadlines. -func (s *Stream) SetDeadline(t time.Time) error { - if err := s.SetReadDeadline(t); err != nil { - return err - } - if err := s.SetWriteDeadline(t); err != nil { - return err - } - return nil -} - -// session closes the stream -func (s *Stream) sessionClose() { - s.dieLock.Lock() - defer s.dieLock.Unlock() - - select { - case <-s.die: - default: - close(s.die) - } -} - -// LocalAddr satisfies net.Conn interface -func (s *Stream) LocalAddr() net.Addr { - if ts, ok := s.sess.conn.(interface { - LocalAddr() net.Addr - }); ok { - return ts.LocalAddr() - } - return nil -} - -// RemoteAddr satisfies net.Conn interface -func (s *Stream) RemoteAddr() net.Addr { - if ts, ok := s.sess.conn.(interface { - RemoteAddr() net.Addr - }); ok { - return ts.RemoteAddr() - } - return nil -} - -// pushBytes a slice into buffer -func (s *Stream) pushBytes(p []byte) { - s.bufferLock.Lock() - s.buffer.Write(p) - s.bufferLock.Unlock() -} - -// recycleTokens transform remaining bytes to tokens(will truncate buffer) -func (s *Stream) recycleTokens() (n int) { - s.bufferLock.Lock() - n = s.buffer.Len() - s.buffer.Reset() - s.bufferLock.Unlock() - return -} - -// split large byte buffer into smaller frames, reference only -func (s *Stream) split(bts []byte, cmd byte, sid uint32) []Frame { - frames := make([]Frame, 0, len(bts)/s.frameSize+1) - for len(bts) > s.frameSize { - frame := newFrame(cmd, sid) - frame.data = bts[:s.frameSize] - bts = bts[s.frameSize:] - frames = append(frames, frame) - } - if len(bts) > 0 { - frame := newFrame(cmd, sid) - frame.data = bts - frames = append(frames, frame) - } - return frames -} - -// notify read event -func (s *Stream) notifyReadEvent() { - select { - case s.chReadEvent <- struct{}{}: - default: - } -} - -// mark this stream has been reset -func (s *Stream) markRST() { - atomic.StoreInt32(&s.rstflag, 1) -} - -var errTimeout error = &timeoutError{} - -type timeoutError struct{} - -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index c02b4d5..0000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/web/frps/src/App.vue b/web/frps/src/App.vue index 0acb42d..755beae 100644 --- a/web/frps/src/App.vue +++ b/web/frps/src/App.vue @@ -44,7 +44,7 @@ methods: { handleSelect(key, path) { if (key == '') { - window.open("https://github.com/KunTengRom/xfrps") + window.open("https://github.com/liudf0716/xfrps") } } }