temporarily remove vendor directory

This commit is contained in:
Ian Kent 2017-04-17 00:13:30 +01:00
parent 82292eb102
commit 43bcb0a000
No known key found for this signature in database
GPG key ID: CE5AC689AF520A48
229 changed files with 0 additions and 38984 deletions

View file

@ -1,27 +0,0 @@
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,10 +0,0 @@
context
=======
[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
gorilla/context is a general purpose registry for global request variables.
> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context

View file

@ -1,143 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context
import (
"net/http"
"sync"
"time"
)
var (
mutex sync.RWMutex
data = make(map[*http.Request]map[interface{}]interface{})
datat = make(map[*http.Request]int64)
)
// Set stores a value for a given key in a given request.
func Set(r *http.Request, key, val interface{}) {
mutex.Lock()
if data[r] == nil {
data[r] = make(map[interface{}]interface{})
datat[r] = time.Now().Unix()
}
data[r][key] = val
mutex.Unlock()
}
// Get returns a value stored for a given key in a given request.
func Get(r *http.Request, key interface{}) interface{} {
mutex.RLock()
if ctx := data[r]; ctx != nil {
value := ctx[key]
mutex.RUnlock()
return value
}
mutex.RUnlock()
return nil
}
// GetOk returns stored value and presence state like multi-value return of map access.
func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
mutex.RLock()
if _, ok := data[r]; ok {
value, ok := data[r][key]
mutex.RUnlock()
return value, ok
}
mutex.RUnlock()
return nil, false
}
// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
func GetAll(r *http.Request) map[interface{}]interface{} {
mutex.RLock()
if context, ok := data[r]; ok {
result := make(map[interface{}]interface{}, len(context))
for k, v := range context {
result[k] = v
}
mutex.RUnlock()
return result
}
mutex.RUnlock()
return nil
}
// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
// the request was registered.
func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
mutex.RLock()
context, ok := data[r]
result := make(map[interface{}]interface{}, len(context))
for k, v := range context {
result[k] = v
}
mutex.RUnlock()
return result, ok
}
// Delete removes a value stored for a given key in a given request.
func Delete(r *http.Request, key interface{}) {
mutex.Lock()
if data[r] != nil {
delete(data[r], key)
}
mutex.Unlock()
}
// Clear removes all values stored for a given request.
//
// This is usually called by a handler wrapper to clean up request
// variables at the end of a request lifetime. See ClearHandler().
func Clear(r *http.Request) {
mutex.Lock()
clear(r)
mutex.Unlock()
}
// clear is Clear without the lock.
func clear(r *http.Request) {
delete(data, r)
delete(datat, r)
}
// Purge removes request data stored for longer than maxAge, in seconds.
// It returns the amount of requests removed.
//
// If maxAge <= 0, all request data is removed.
//
// This is only used for sanity check: in case context cleaning was not
// properly set some request data can be kept forever, consuming an increasing
// amount of memory. In case this is detected, Purge() must be called
// periodically until the problem is fixed.
func Purge(maxAge int) int {
mutex.Lock()
count := 0
if maxAge <= 0 {
count = len(data)
data = make(map[*http.Request]map[interface{}]interface{})
datat = make(map[*http.Request]int64)
} else {
min := time.Now().Unix() - int64(maxAge)
for r := range data {
if datat[r] < min {
clear(r)
count++
}
}
}
mutex.Unlock()
return count
}
// ClearHandler wraps an http.Handler and clears request values at the end
// of a request lifetime.
func ClearHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer Clear(r)
h.ServeHTTP(w, r)
})
}

View file

@ -1,88 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package context stores values shared during a request lifetime.
Note: gorilla/context, having been born well before `context.Context` existed,
does not play well > with the shallow copying of the request that
[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
(added to net/http Go 1.7 onwards) performs. You should either use *just*
gorilla/context, or moving forward, the new `http.Request.Context()`.
For example, a router can set variables extracted from the URL and later
application handlers can access those values, or it can be used to store
sessions values to be saved at the end of a request. There are several
others common uses.
The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
Here's the basic usage: first define the keys that you will need. The key
type is interface{} so a key can be of any type that supports equality.
Here we define a key using a custom int type to avoid name collisions:
package foo
import (
"github.com/gorilla/context"
)
type key int
const MyKey key = 0
Then set a variable. Variables are bound to an http.Request object, so you
need a request instance to set a value:
context.Set(r, MyKey, "bar")
The application can later access the variable using the same key you provided:
func MyHandler(w http.ResponseWriter, r *http.Request) {
// val is "bar".
val := context.Get(r, foo.MyKey)
// returns ("bar", true)
val, ok := context.GetOk(r, foo.MyKey)
// ...
}
And that's all about the basic usage. We discuss some other ideas below.
Any type can be stored in the context. To enforce a given type, make the key
private and wrap Get() and Set() to accept and return values of a specific
type:
type key int
const mykey key = 0
// GetMyKey returns a value for this package from the request values.
func GetMyKey(r *http.Request) SomeType {
if rv := context.Get(r, mykey); rv != nil {
return rv.(SomeType)
}
return nil
}
// SetMyKey sets a value for this package in the request values.
func SetMyKey(r *http.Request, val SomeType) {
context.Set(r, mykey, val)
}
Variables must be cleared at the end of a request, to remove all values
that were stored. This can be done in an http.Handler, after a request was
served. Just call Clear() passing the request:
context.Clear(r)
...or use ClearHandler(), which conveniently wraps an http.Handler to clear
variables at the end of a request lifetime.
The Routers from the packages gorilla/mux and gorilla/pat call Clear()
so if you are using either of them you don't need to clear the context manually.
*/
package context

View file

@ -1,27 +0,0 @@
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,340 +0,0 @@
gorilla/mux
===
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
http://www.gorillatoolkit.org/pkg/mux
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
their respective handler.
The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
* URL hosts and paths can have variables with an optional regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
---
* [Install](#install)
* [Examples](#examples)
* [Matching Routes](#matching-routes)
* [Listing Routes](#listing-routes)
* [Static Files](#static-files)
* [Registered URLs](#registered-urls)
* [Full Example](#full-example)
---
## Install
With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
```sh
go get -u github.com/gorilla/mux
```
## Examples
Let's start registering a couple of URL paths and handlers:
```go
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
```
Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
```go
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
```
The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
```go
func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Category: %v\n", vars["category"])
}
```
And this is all you need to know about the basic usage. More advanced options are explained below.
### Matching Routes
Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
```go
r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.domain.com")
```
There are several other matchers that can be added. To match path prefixes:
```go
r.PathPrefix("/products/")
```
...or HTTP methods:
```go
r.Methods("GET", "POST")
```
...or URL schemes:
```go
r.Schemes("https")
```
...or header values:
```go
r.Headers("X-Requested-With", "XMLHttpRequest")
```
...or query values:
```go
r.Queries("key", "value")
```
...or to use a custom matcher function:
```go
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
```
...and finally, it is possible to combine several matchers in a single route:
```go
r.HandleFunc("/products", ProductsHandler).
Host("www.example.com").
Methods("GET").
Schemes("http")
```
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
```go
r := mux.NewRouter()
s := r.Host("www.example.com").Subrouter()
```
Then register routes in the subrouter:
```go
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
```
The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
```go
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
```
### Listing Routes
Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
```go
package main
import (
"fmt"
"net/http"
"github.com/gorilla/mux"
)
func handler(w http.ResponseWriter, r *http.Request) {
return
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.HandleFunc("/products", handler)
r.HandleFunc("/articles", handler)
r.HandleFunc("/articles/{id}", handler)
r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
t, err := route.GetPathTemplate()
if err != nil {
return err
}
fmt.Println(t)
return nil
})
http.Handle("/", r)
}
```
### Static Files
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
request that matches "/static/*". This makes it easy to serve static files with mux:
```go
func main() {
var dir string
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
r := mux.NewRouter()
// This will serve files under http://localhost:8000/static/<filename>
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
```
### Registered URLs
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
```go
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
```
To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
```go
url, err := r.Get("article").URL("category", "technology", "id", "42")
```
...and the result will be a `url.URL` with the following path:
```
"/articles/technology/42"
```
This also works for host variables:
```go
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
```
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
Regex support also exists for matching Headers within a route. For example, we could do:
```go
r.HeadersRegexp("Content-Type", "application/(text|json)")
```
...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
```go
// "http://news.domain.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
```
And if you use subrouters, host and path defined separately can be built as well:
```go
r := mux.NewRouter()
s := r.Host("{subdomain}.domain.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
```
## Full Example
Here's a complete, runnable example of a small `mux` based server:
```go
package main
import (
"net/http"
"log"
"github.com/gorilla/mux"
)
func YourHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Gorilla!\n"))
}
func main() {
r := mux.NewRouter()
// Routes consist of a path and a handler function.
r.HandleFunc("/", YourHandler)
// Bind to a port and pass our router in
log.Fatal(http.ListenAndServe(":8000", r))
}
```
## License
BSD licensed. See the LICENSE file for details.

View file

@ -1,26 +0,0 @@
// +build !go1.7
package mux
import (
"net/http"
"github.com/gorilla/context"
)
func contextGet(r *http.Request, key interface{}) interface{} {
return context.Get(r, key)
}
func contextSet(r *http.Request, key, val interface{}) *http.Request {
if val == nil {
return r
}
context.Set(r, key, val)
return r
}
func contextClear(r *http.Request) {
context.Clear(r)
}

View file

@ -1,24 +0,0 @@
// +build go1.7
package mux
import (
"context"
"net/http"
)
func contextGet(r *http.Request, key interface{}) interface{} {
return r.Context().Value(key)
}
func contextSet(r *http.Request, key, val interface{}) *http.Request {
if val == nil {
return r
}
return r.WithContext(context.WithValue(r.Context(), key, val))
}
func contextClear(r *http.Request) {
return
}

240
vendor/github.com/gorilla/mux/doc.go generated vendored
View file

@ -1,240 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package mux implements a request router and dispatcher.
The name mux stands for "HTTP request multiplexer". Like the standard
http.ServeMux, mux.Router matches incoming requests against a list of
registered routes and calls a handler for the route that matches the URL
or other conditions. The main features are:
* Requests can be matched based on URL host, path, path prefix, schemes,
header and query values, HTTP methods or using custom matchers.
* URL hosts and paths can have variables with an optional regular
expression.
* Registered URLs can be built, or "reversed", which helps maintaining
references to resources.
* Routes can be used as subrouters: nested routes are only tested if the
parent route matches. This is useful to define groups of routes that
share common conditions like a host, a path prefix or other repeated
attributes. As a bonus, this optimizes request matching.
* It implements the http.Handler interface so it is compatible with the
standard http.ServeMux.
Let's start registering a couple of URL paths and handlers:
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
Here we register three routes mapping URL paths to handlers. This is
equivalent to how http.HandleFunc() works: if an incoming request URL matches
one of the paths, the corresponding handler is called passing
(http.ResponseWriter, *http.Request) as parameters.
Paths can have variables. They are defined using the format {name} or
{name:pattern}. If a regular expression pattern is not defined, the matched
variable will be anything until the next slash. For example:
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
The names are used to create a map of route variables which can be retrieved
calling mux.Vars():
vars := mux.Vars(request)
category := vars["category"]
Note that if any capturing groups are present, mux will panic() during parsing. To prevent
this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
when capturing groups were present.
And this is all you need to know about the basic usage. More advanced options
are explained below.
Routes can also be restricted to a domain or subdomain. Just define a host
pattern to be matched. They can also have variables:
r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.domain.com")
There are several other matchers that can be added. To match path prefixes:
r.PathPrefix("/products/")
...or HTTP methods:
r.Methods("GET", "POST")
...or URL schemes:
r.Schemes("https")
...or header values:
r.Headers("X-Requested-With", "XMLHttpRequest")
...or query values:
r.Queries("key", "value")
...or to use a custom matcher function:
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
...and finally, it is possible to combine several matchers in a single route:
r.HandleFunc("/products", ProductsHandler).
Host("www.example.com").
Methods("GET").
Schemes("http")
Setting the same matching conditions again and again can be boring, so we have
a way to group several routes that share the same requirements.
We call it "subrouting".
For example, let's say we have several URLs that should only match when the
host is "www.example.com". Create a route for that host and get a "subrouter"
from it:
r := mux.NewRouter()
s := r.Host("www.example.com").Subrouter()
Then register routes in the subrouter:
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
The three URL paths we registered above will only be tested if the domain is
"www.example.com", because the subrouter is tested first. This is not
only convenient, but also optimizes request matching. You can create
subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define
subrouters in a central place and then parts of the app can register its
paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix,
the inner routes use it as base for their paths:
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
Note that the path provided to PathPrefix() represents a "wildcard": calling
PathPrefix("/static/").Handler(...) means that the handler will be passed any
request that matches "/static/*". This makes it easy to serve static files with mux:
func main() {
var dir string
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
r := mux.NewRouter()
// This will serve files under http://localhost:8000/static/<filename>
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built,
or "reversed". We define a name calling Name() on a route. For example:
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
To build a URL, get the route and call the URL() method, passing a sequence of
key/value pairs for the route variables. For the previous route, we would do:
url, err := r.Get("article").URL("category", "technology", "id", "42")
...and the result will be a url.URL with the following path:
"/articles/technology/42"
This also works for host variables:
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
All variables defined in the route are required, and their values must
conform to the corresponding patterns. These requirements guarantee that a
generated URL will always match a registered route -- the only exception is
for explicitly defined "build-only" routes which never match.
Regex support also exists for matching Headers within a route. For example, we could do:
r.HeadersRegexp("Content-Type", "application/(text|json)")
...and the route will match both requests with a Content-Type of `application/json` as well as
`application/text`
There's also a way to build only the URL host or path for a route:
use the methods URLHost() or URLPath() instead. For the previous route,
we would do:
// "http://news.domain.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
And if you use subrouters, host and path defined separately can be built
as well:
r := mux.NewRouter()
s := r.Host("{subdomain}.domain.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
*/
package mux

542
vendor/github.com/gorilla/mux/mux.go generated vendored
View file

@ -1,542 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"errors"
"fmt"
"net/http"
"path"
"regexp"
"strings"
)
// NewRouter returns a new router instance.
func NewRouter() *Router {
return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
}
// Router registers routes to be matched and dispatches a handler.
//
// It implements the http.Handler interface, so it can be registered to serve
// requests:
//
// var router = mux.NewRouter()
//
// func main() {
// http.Handle("/", router)
// }
//
// Or, for Google App Engine, register it in a init() function:
//
// func init() {
// http.Handle("/", router)
// }
//
// This will send all incoming requests to the router.
type Router struct {
// Configurable Handler to be used when no route matches.
NotFoundHandler http.Handler
// Parent route, if this is a subrouter.
parent parentRoute
// Routes to be matched, in order.
routes []*Route
// Routes by name for URL building.
namedRoutes map[string]*Route
// See Router.StrictSlash(). This defines the flag for new routes.
strictSlash bool
// See Router.SkipClean(). This defines the flag for new routes.
skipClean bool
// If true, do not clear the request context after handling the request.
// This has no effect when go1.7+ is used, since the context is stored
// on the request itself.
KeepContext bool
// see Router.UseEncodedPath(). This defines a flag for all routes.
useEncodedPath bool
}
// Match matches registered routes against the request.
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
for _, route := range r.routes {
if route.Match(req, match) {
return true
}
}
// Closest match for a router (includes sub-routers)
if r.NotFoundHandler != nil {
match.Handler = r.NotFoundHandler
return true
}
return false
}
// ServeHTTP dispatches the handler registered in the matched route.
//
// When there is a match, the route variables can be retrieved calling
// mux.Vars(request).
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if !r.skipClean {
path := req.URL.Path
if r.useEncodedPath {
path = getPath(req)
}
// Clean path to canonical form and redirect.
if p := cleanPath(path); p != path {
// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
// This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
// http://code.google.com/p/go/issues/detail?id=5252
url := *req.URL
url.Path = p
p = url.String()
w.Header().Set("Location", p)
w.WriteHeader(http.StatusMovedPermanently)
return
}
}
var match RouteMatch
var handler http.Handler
if r.Match(req, &match) {
handler = match.Handler
req = setVars(req, match.Vars)
req = setCurrentRoute(req, match.Route)
}
if handler == nil {
handler = http.NotFoundHandler()
}
if !r.KeepContext {
defer contextClear(req)
}
handler.ServeHTTP(w, req)
}
// Get returns a route registered with the given name.
func (r *Router) Get(name string) *Route {
return r.getNamedRoutes()[name]
}
// GetRoute returns a route registered with the given name. This method
// was renamed to Get() and remains here for backwards compatibility.
func (r *Router) GetRoute(name string) *Route {
return r.getNamedRoutes()[name]
}
// StrictSlash defines the trailing slash behavior for new routes. The initial
// value is false.
//
// When true, if the route path is "/path/", accessing "/path" will redirect
// to the former and vice versa. In other words, your application will always
// see the path as specified in the route.
//
// When false, if the route path is "/path", accessing "/path/" will not match
// this route and vice versa.
//
// Special case: when a route sets a path prefix using the PathPrefix() method,
// strict slash is ignored for that route because the redirect behavior can't
// be determined from a prefix alone. However, any subrouters created from that
// route inherit the original StrictSlash setting.
func (r *Router) StrictSlash(value bool) *Router {
r.strictSlash = value
return r
}
// SkipClean defines the path cleaning behaviour for new routes. The initial
// value is false. Users should be careful about which routes are not cleaned
//
// When true, if the route path is "/path//to", it will remain with the double
// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
//
// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
// become /fetch/http/xkcd.com/534
func (r *Router) SkipClean(value bool) *Router {
r.skipClean = value
return r
}
// UseEncodedPath tells the router to match the encoded original path
// to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
// This behavior has the drawback of needing to match routes against
// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
// to r.URL.Path will not affect routing when this flag is on and thus may
// induce unintended behavior.
//
// If not called, the router will match the unencoded path to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
func (r *Router) UseEncodedPath() *Router {
r.useEncodedPath = true
return r
}
// ----------------------------------------------------------------------------
// parentRoute
// ----------------------------------------------------------------------------
// getNamedRoutes returns the map where named routes are registered.
func (r *Router) getNamedRoutes() map[string]*Route {
if r.namedRoutes == nil {
if r.parent != nil {
r.namedRoutes = r.parent.getNamedRoutes()
} else {
r.namedRoutes = make(map[string]*Route)
}
}
return r.namedRoutes
}
// getRegexpGroup returns regexp definitions from the parent route, if any.
func (r *Router) getRegexpGroup() *routeRegexpGroup {
if r.parent != nil {
return r.parent.getRegexpGroup()
}
return nil
}
func (r *Router) buildVars(m map[string]string) map[string]string {
if r.parent != nil {
m = r.parent.buildVars(m)
}
return m
}
// ----------------------------------------------------------------------------
// Route factories
// ----------------------------------------------------------------------------
// NewRoute registers an empty route.
func (r *Router) NewRoute() *Route {
route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
r.routes = append(r.routes, route)
return route
}
// Handle registers a new route with a matcher for the URL path.
// See Route.Path() and Route.Handler().
func (r *Router) Handle(path string, handler http.Handler) *Route {
return r.NewRoute().Path(path).Handler(handler)
}
// HandleFunc registers a new route with a matcher for the URL path.
// See Route.Path() and Route.HandlerFunc().
func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
*http.Request)) *Route {
return r.NewRoute().Path(path).HandlerFunc(f)
}
// Headers registers a new route with a matcher for request header values.
// See Route.Headers().
func (r *Router) Headers(pairs ...string) *Route {
return r.NewRoute().Headers(pairs...)
}
// Host registers a new route with a matcher for the URL host.
// See Route.Host().
func (r *Router) Host(tpl string) *Route {
return r.NewRoute().Host(tpl)
}
// MatcherFunc registers a new route with a custom matcher function.
// See Route.MatcherFunc().
func (r *Router) MatcherFunc(f MatcherFunc) *Route {
return r.NewRoute().MatcherFunc(f)
}
// Methods registers a new route with a matcher for HTTP methods.
// See Route.Methods().
func (r *Router) Methods(methods ...string) *Route {
return r.NewRoute().Methods(methods...)
}
// Path registers a new route with a matcher for the URL path.
// See Route.Path().
func (r *Router) Path(tpl string) *Route {
return r.NewRoute().Path(tpl)
}
// PathPrefix registers a new route with a matcher for the URL path prefix.
// See Route.PathPrefix().
func (r *Router) PathPrefix(tpl string) *Route {
return r.NewRoute().PathPrefix(tpl)
}
// Queries registers a new route with a matcher for URL query values.
// See Route.Queries().
func (r *Router) Queries(pairs ...string) *Route {
return r.NewRoute().Queries(pairs...)
}
// Schemes registers a new route with a matcher for URL schemes.
// See Route.Schemes().
func (r *Router) Schemes(schemes ...string) *Route {
return r.NewRoute().Schemes(schemes...)
}
// BuildVarsFunc registers a new route with a custom function for modifying
// route variables before building a URL.
func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
return r.NewRoute().BuildVarsFunc(f)
}
// Walk walks the router and all its sub-routers, calling walkFn for each route
// in the tree. The routes are walked in the order they were added. Sub-routers
// are explored depth-first.
func (r *Router) Walk(walkFn WalkFunc) error {
return r.walk(walkFn, []*Route{})
}
// SkipRouter is used as a return value from WalkFuncs to indicate that the
// router that walk is about to descend down to should be skipped.
var SkipRouter = errors.New("skip this router")
// WalkFunc is the type of the function called for each route visited by Walk.
// At every invocation, it is given the current route, and the current router,
// and a list of ancestor routes that lead to the current route.
type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
for _, t := range r.routes {
if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
continue
}
err := walkFn(t, r, ancestors)
if err == SkipRouter {
continue
}
if err != nil {
return err
}
for _, sr := range t.matchers {
if h, ok := sr.(*Router); ok {
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
}
}
if h, ok := t.handler.(*Router); ok {
ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
ancestors = ancestors[:len(ancestors)-1]
}
}
return nil
}
// ----------------------------------------------------------------------------
// Context
// ----------------------------------------------------------------------------
// RouteMatch stores information about a matched route.
type RouteMatch struct {
Route *Route
Handler http.Handler
Vars map[string]string
}
type contextKey int
const (
varsKey contextKey = iota
routeKey
)
// Vars returns the route variables for the current request, if any.
func Vars(r *http.Request) map[string]string {
if rv := contextGet(r, varsKey); rv != nil {
return rv.(map[string]string)
}
return nil
}
// CurrentRoute returns the matched route for the current request, if any.
// This only works when called inside the handler of the matched route
// because the matched route is stored in the request context which is cleared
// after the handler returns, unless the KeepContext option is set on the
// Router.
func CurrentRoute(r *http.Request) *Route {
if rv := contextGet(r, routeKey); rv != nil {
return rv.(*Route)
}
return nil
}
func setVars(r *http.Request, val interface{}) *http.Request {
return contextSet(r, varsKey, val)
}
func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
return contextSet(r, routeKey, val)
}
// ----------------------------------------------------------------------------
// Helpers
// ----------------------------------------------------------------------------
// getPath returns the escaped path if possible; doing what URL.EscapedPath()
// which was added in go1.5 does
func getPath(req *http.Request) string {
if req.RequestURI != "" {
// Extract the path from RequestURI (which is escaped unlike URL.Path)
// as detailed here as detailed in https://golang.org/pkg/net/url/#URL
// for < 1.5 server side workaround
// http://localhost/path/here?v=1 -> /path/here
path := req.RequestURI
path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
path = strings.TrimPrefix(path, req.URL.Host)
if i := strings.LastIndex(path, "?"); i > -1 {
path = path[:i]
}
if i := strings.LastIndex(path, "#"); i > -1 {
path = path[:i]
}
return path
}
return req.URL.Path
}
// cleanPath returns the canonical path for p, eliminating . and .. elements.
// Borrowed from the net/http package.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}
// uniqueVars returns an error if two slices contain duplicated strings.
func uniqueVars(s1, s2 []string) error {
for _, v1 := range s1 {
for _, v2 := range s2 {
if v1 == v2 {
return fmt.Errorf("mux: duplicated route variable %q", v2)
}
}
}
return nil
}
// checkPairs returns the count of strings passed in, and an error if
// the count is not an even number.
func checkPairs(pairs ...string) (int, error) {
length := len(pairs)
if length%2 != 0 {
return length, fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
}
return length, nil
}
// mapFromPairsToString converts variadic string parameters to a
// string to string map.
func mapFromPairsToString(pairs ...string) (map[string]string, error) {
length, err := checkPairs(pairs...)
if err != nil {
return nil, err
}
m := make(map[string]string, length/2)
for i := 0; i < length; i += 2 {
m[pairs[i]] = pairs[i+1]
}
return m, nil
}
// mapFromPairsToRegex converts variadic string paramers to a
// string to regex map.
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
length, err := checkPairs(pairs...)
if err != nil {
return nil, err
}
m := make(map[string]*regexp.Regexp, length/2)
for i := 0; i < length; i += 2 {
regex, err := regexp.Compile(pairs[i+1])
if err != nil {
return nil, err
}
m[pairs[i]] = regex
}
return m, nil
}
// matchInArray returns true if the given string value is in the array.
func matchInArray(arr []string, value string) bool {
for _, v := range arr {
if v == value {
return true
}
}
return false
}
// matchMapWithString returns true if the given key/value pairs exist in a given map.
func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != "" {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v == value {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}
// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
// the given regex
func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != nil {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v.MatchString(value) {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}

View file

@ -1,323 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"bytes"
"fmt"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
)
// newRouteRegexp parses a route template and returns a routeRegexp,
// used to match a host, a path or a query string.
//
// It will extract named variables, assemble a regexp to be matched, create
// a "reverse" template to build URLs and compile regexps to validate variable
// values used in URL building.
//
// Previously we accepted only Python-like identifiers for variable
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
// name and pattern can't be empty, and names can't contain a colon.
func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
// Check if it is well-formed.
idxs, errBraces := braceIndices(tpl)
if errBraces != nil {
return nil, errBraces
}
// Backup the original.
template := tpl
// Now let's parse it.
defaultPattern := "[^/]+"
if matchQuery {
defaultPattern = "[^?&]*"
} else if matchHost {
defaultPattern = "[^.]+"
matchPrefix = false
}
// Only match strict slash if not matching
if matchPrefix || matchHost || matchQuery {
strictSlash = false
}
// Set a flag for strictSlash.
endSlash := false
if strictSlash && strings.HasSuffix(tpl, "/") {
tpl = tpl[:len(tpl)-1]
endSlash = true
}
varsN := make([]string, len(idxs)/2)
varsR := make([]*regexp.Regexp, len(idxs)/2)
pattern := bytes.NewBufferString("")
pattern.WriteByte('^')
reverse := bytes.NewBufferString("")
var end int
var err error
for i := 0; i < len(idxs); i += 2 {
// Set all values we are interested in.
raw := tpl[end:idxs[i]]
end = idxs[i+1]
parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
name := parts[0]
patt := defaultPattern
if len(parts) == 2 {
patt = parts[1]
}
// Name or pattern can't be empty.
if name == "" || patt == "" {
return nil, fmt.Errorf("mux: missing name or pattern in %q",
tpl[idxs[i]:end])
}
// Build the regexp pattern.
fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
// Build the reverse template.
fmt.Fprintf(reverse, "%s%%s", raw)
// Append variable name and compiled pattern.
varsN[i/2] = name
varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
if err != nil {
return nil, err
}
}
// Add the remaining.
raw := tpl[end:]
pattern.WriteString(regexp.QuoteMeta(raw))
if strictSlash {
pattern.WriteString("[/]?")
}
if matchQuery {
// Add the default pattern if the query value is empty
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
pattern.WriteString(defaultPattern)
}
}
if !matchPrefix {
pattern.WriteByte('$')
}
reverse.WriteString(raw)
if endSlash {
reverse.WriteByte('/')
}
// Compile full regexp.
reg, errCompile := regexp.Compile(pattern.String())
if errCompile != nil {
return nil, errCompile
}
// Check for capturing groups which used to work in older versions
if reg.NumSubexp() != len(idxs)/2 {
panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
}
// Done!
return &routeRegexp{
template: template,
matchHost: matchHost,
matchQuery: matchQuery,
strictSlash: strictSlash,
useEncodedPath: useEncodedPath,
regexp: reg,
reverse: reverse.String(),
varsN: varsN,
varsR: varsR,
}, nil
}
// routeRegexp stores a regexp to match a host or path and information to
// collect and validate route variables.
type routeRegexp struct {
// The unmodified template.
template string
// True for host match, false for path or query string match.
matchHost bool
// True for query string match, false for path and host match.
matchQuery bool
// The strictSlash value defined on the route, but disabled if PathPrefix was used.
strictSlash bool
// Determines whether to use encoded path from getPath function or unencoded
// req.URL.Path for path matching
useEncodedPath bool
// Expanded regexp.
regexp *regexp.Regexp
// Reverse template.
reverse string
// Variable names.
varsN []string
// Variable regexps (validators).
varsR []*regexp.Regexp
}
// Match matches the regexp against the URL host or path.
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
if !r.matchHost {
if r.matchQuery {
return r.matchQueryString(req)
}
path := req.URL.Path
if r.useEncodedPath {
path = getPath(req)
}
return r.regexp.MatchString(path)
}
return r.regexp.MatchString(getHost(req))
}
// url builds a URL part using the given values.
func (r *routeRegexp) url(values map[string]string) (string, error) {
urlValues := make([]interface{}, len(r.varsN))
for k, v := range r.varsN {
value, ok := values[v]
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
urlValues[k] = value
}
rv := fmt.Sprintf(r.reverse, urlValues...)
if !r.regexp.MatchString(rv) {
// The URL is checked against the full regexp, instead of checking
// individual variables. This is faster but to provide a good error
// message, we check individual regexps if the URL doesn't match.
for k, v := range r.varsN {
if !r.varsR[k].MatchString(values[v]) {
return "", fmt.Errorf(
"mux: variable %q doesn't match, expected %q", values[v],
r.varsR[k].String())
}
}
}
return rv, nil
}
// getURLQuery returns a single query parameter from a request URL.
// For a URL with foo=bar&baz=ding, we return only the relevant key
// value pair for the routeRegexp.
func (r *routeRegexp) getURLQuery(req *http.Request) string {
if !r.matchQuery {
return ""
}
templateKey := strings.SplitN(r.template, "=", 2)[0]
for key, vals := range req.URL.Query() {
if key == templateKey && len(vals) > 0 {
return key + "=" + vals[0]
}
}
return ""
}
func (r *routeRegexp) matchQueryString(req *http.Request) bool {
return r.regexp.MatchString(r.getURLQuery(req))
}
// braceIndices returns the first level curly brace indices from a string.
// It returns an error in case of unbalanced braces.
func braceIndices(s string) ([]int, error) {
var level, idx int
var idxs []int
for i := 0; i < len(s); i++ {
switch s[i] {
case '{':
if level++; level == 1 {
idx = i
}
case '}':
if level--; level == 0 {
idxs = append(idxs, idx, i+1)
} else if level < 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
}
}
if level != 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
return idxs, nil
}
// varGroupName builds a capturing group name for the indexed variable.
func varGroupName(idx int) string {
return "v" + strconv.Itoa(idx)
}
// ----------------------------------------------------------------------------
// routeRegexpGroup
// ----------------------------------------------------------------------------
// routeRegexpGroup groups the route matchers that carry variables.
type routeRegexpGroup struct {
host *routeRegexp
path *routeRegexp
queries []*routeRegexp
}
// setMatch extracts the variables from the URL once a route matches.
func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
// Store host variables.
if v.host != nil {
host := getHost(req)
matches := v.host.regexp.FindStringSubmatchIndex(host)
if len(matches) > 0 {
extractVars(host, matches, v.host.varsN, m.Vars)
}
}
path := req.URL.Path
if r.useEncodedPath {
path = getPath(req)
}
// Store path variables.
if v.path != nil {
matches := v.path.regexp.FindStringSubmatchIndex(path)
if len(matches) > 0 {
extractVars(path, matches, v.path.varsN, m.Vars)
// Check if we should redirect.
if v.path.strictSlash {
p1 := strings.HasSuffix(path, "/")
p2 := strings.HasSuffix(v.path.template, "/")
if p1 != p2 {
u, _ := url.Parse(req.URL.String())
if p1 {
u.Path = u.Path[:len(u.Path)-1]
} else {
u.Path += "/"
}
m.Handler = http.RedirectHandler(u.String(), 301)
}
}
}
}
// Store query string variables.
for _, q := range v.queries {
queryURL := q.getURLQuery(req)
matches := q.regexp.FindStringSubmatchIndex(queryURL)
if len(matches) > 0 {
extractVars(queryURL, matches, q.varsN, m.Vars)
}
}
}
// getHost tries its best to return the request host.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
host := r.Host
// Slice off any port information.
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
return host
}
func extractVars(input string, matches []int, names []string, output map[string]string) {
for i, name := range names {
output[name] = input[matches[2*i+2]:matches[2*i+3]]
}
}

View file

@ -1,636 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"errors"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
)
// Route stores information to match a request and build URLs.
type Route struct {
// Parent where the route was registered (a Router).
parent parentRoute
// Request handler for the route.
handler http.Handler
// List of matchers.
matchers []matcher
// Manager for the variables from host and path.
regexp *routeRegexpGroup
// If true, when the path pattern is "/path/", accessing "/path" will
// redirect to the former and vice versa.
strictSlash bool
// If true, when the path pattern is "/path//to", accessing "/path//to"
// will not redirect
skipClean bool
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
useEncodedPath bool
// If true, this route never matches: it is only used to build URLs.
buildOnly bool
// The name used to build URLs.
name string
// Error resulted from building a route.
err error
buildVarsFunc BuildVarsFunc
}
func (r *Route) SkipClean() bool {
return r.skipClean
}
// Match matches the route against the request.
func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if r.buildOnly || r.err != nil {
return false
}
// Match everything.
for _, m := range r.matchers {
if matched := m.Match(req, match); !matched {
return false
}
}
// Yay, we have a match. Let's collect some info about it.
if match.Route == nil {
match.Route = r
}
if match.Handler == nil {
match.Handler = r.handler
}
if match.Vars == nil {
match.Vars = make(map[string]string)
}
// Set variables.
if r.regexp != nil {
r.regexp.setMatch(req, match, r)
}
return true
}
// ----------------------------------------------------------------------------
// Route attributes
// ----------------------------------------------------------------------------
// GetError returns an error resulted from building the route, if any.
func (r *Route) GetError() error {
return r.err
}
// BuildOnly sets the route to never match: it is only used to build URLs.
func (r *Route) BuildOnly() *Route {
r.buildOnly = true
return r
}
// Handler --------------------------------------------------------------------
// Handler sets a handler for the route.
func (r *Route) Handler(handler http.Handler) *Route {
if r.err == nil {
r.handler = handler
}
return r
}
// HandlerFunc sets a handler function for the route.
func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
return r.Handler(http.HandlerFunc(f))
}
// GetHandler returns the handler for the route, if any.
func (r *Route) GetHandler() http.Handler {
return r.handler
}
// Name -----------------------------------------------------------------------
// Name sets the name for the route, used to build URLs.
// If the name was registered already it will be overwritten.
func (r *Route) Name(name string) *Route {
if r.name != "" {
r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
r.name, name)
}
if r.err == nil {
r.name = name
r.getNamedRoutes()[name] = r
}
return r
}
// GetName returns the name for the route, if any.
func (r *Route) GetName() string {
return r.name
}
// ----------------------------------------------------------------------------
// Matchers
// ----------------------------------------------------------------------------
// matcher types try to match a request.
type matcher interface {
Match(*http.Request, *RouteMatch) bool
}
// addMatcher adds a matcher to the route.
func (r *Route) addMatcher(m matcher) *Route {
if r.err == nil {
r.matchers = append(r.matchers, m)
}
return r
}
// addRegexpMatcher adds a host or path matcher and builder to a route.
func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
if r.err != nil {
return r.err
}
r.regexp = r.getRegexpGroup()
if !matchHost && !matchQuery {
if len(tpl) > 0 && tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
}
if r.regexp.path != nil {
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
}
}
rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
if err != nil {
return err
}
for _, q := range r.regexp.queries {
if err = uniqueVars(rr.varsN, q.varsN); err != nil {
return err
}
}
if matchHost {
if r.regexp.path != nil {
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
return err
}
}
r.regexp.host = rr
} else {
if r.regexp.host != nil {
if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
return err
}
}
if matchQuery {
r.regexp.queries = append(r.regexp.queries, rr)
} else {
r.regexp.path = rr
}
}
r.addMatcher(rr)
return nil
}
// Headers --------------------------------------------------------------------
// headerMatcher matches the request against header values.
type headerMatcher map[string]string
func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMapWithString(m, r.Header, true)
}
// Headers adds a matcher for request header values.
// It accepts a sequence of key/value pairs to be matched. For example:
//
// r := mux.NewRouter()
// r.Headers("Content-Type", "application/json",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both request header values match.
// If the value is an empty string, it will match any value if the key is set.
func (r *Route) Headers(pairs ...string) *Route {
if r.err == nil {
var headers map[string]string
headers, r.err = mapFromPairsToString(pairs...)
return r.addMatcher(headerMatcher(headers))
}
return r
}
// headerRegexMatcher matches the request against the route given a regex for the header
type headerRegexMatcher map[string]*regexp.Regexp
func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMapWithRegex(m, r.Header, true)
}
// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
// support. For example:
//
// r := mux.NewRouter()
// r.HeadersRegexp("Content-Type", "application/(text|json)",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both the request header matches both regular expressions.
// It the value is an empty string, it will match any value if the key is set.
func (r *Route) HeadersRegexp(pairs ...string) *Route {
if r.err == nil {
var headers map[string]*regexp.Regexp
headers, r.err = mapFromPairsToRegex(pairs...)
return r.addMatcher(headerRegexMatcher(headers))
}
return r
}
// Host -----------------------------------------------------------------------
// Host adds a matcher for the URL host.
// It accepts a template with zero or more URL variables enclosed by {}.
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next dot.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter()
// r.Host("www.example.com")
// r.Host("{subdomain}.domain.com")
// r.Host("{subdomain:[a-z]+}.domain.com")
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Host(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, true, false, false)
return r
}
// MatcherFunc ----------------------------------------------------------------
// MatcherFunc is the function signature used by custom matchers.
type MatcherFunc func(*http.Request, *RouteMatch) bool
// Match returns the match for a given request.
func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
return m(r, match)
}
// MatcherFunc adds a custom function to be used as request matcher.
func (r *Route) MatcherFunc(f MatcherFunc) *Route {
return r.addMatcher(f)
}
// Methods --------------------------------------------------------------------
// methodMatcher matches the request against HTTP methods.
type methodMatcher []string
func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchInArray(m, r.Method)
}
// Methods adds a matcher for HTTP methods.
// It accepts a sequence of one or more methods to be matched, e.g.:
// "GET", "POST", "PUT".
func (r *Route) Methods(methods ...string) *Route {
for k, v := range methods {
methods[k] = strings.ToUpper(v)
}
return r.addMatcher(methodMatcher(methods))
}
// Path -----------------------------------------------------------------------
// Path adds a matcher for the URL path.
// It accepts a template with zero or more URL variables enclosed by {}. The
// template must start with a "/".
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter()
// r.Path("/products/").Handler(ProductsHandler)
// r.Path("/products/{key}").Handler(ProductsHandler)
// r.Path("/articles/{category}/{id:[0-9]+}").
// Handler(ArticleHandler)
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Path(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, false, false, false)
return r
}
// PathPrefix -----------------------------------------------------------------
// PathPrefix adds a matcher for the URL path prefix. This matches if the given
// template is a prefix of the full URL path. See Route.Path() for details on
// the tpl argument.
//
// Note that it does not treat slashes specially ("/foobar/" will be matched by
// the prefix "/foo") so you may want to use a trailing slash here.
//
// Also note that the setting of Router.StrictSlash() has no effect on routes
// with a PathPrefix matcher.
func (r *Route) PathPrefix(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, false, true, false)
return r
}
// Query ----------------------------------------------------------------------
// Queries adds a matcher for URL query values.
// It accepts a sequence of key/value pairs. Values may define variables.
// For example:
//
// r := mux.NewRouter()
// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
//
// The above route will only match if the URL contains the defined queries
// values, e.g.: ?foo=bar&id=42.
//
// It the value is an empty string, it will match any value if the key is set.
//
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
func (r *Route) Queries(pairs ...string) *Route {
length := len(pairs)
if length%2 != 0 {
r.err = fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
return nil
}
for i := 0; i < length; i += 2 {
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
return r
}
}
return r
}
// Schemes --------------------------------------------------------------------
// schemeMatcher matches the request against URL schemes.
type schemeMatcher []string
func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchInArray(m, r.URL.Scheme)
}
// Schemes adds a matcher for URL schemes.
// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
func (r *Route) Schemes(schemes ...string) *Route {
for k, v := range schemes {
schemes[k] = strings.ToLower(v)
}
return r.addMatcher(schemeMatcher(schemes))
}
// BuildVarsFunc --------------------------------------------------------------
// BuildVarsFunc is the function signature used by custom build variable
// functions (which can modify route variables before a route's URL is built).
type BuildVarsFunc func(map[string]string) map[string]string
// BuildVarsFunc adds a custom function to be used to modify build variables
// before a route's URL is built.
func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
r.buildVarsFunc = f
return r
}
// Subrouter ------------------------------------------------------------------
// Subrouter creates a subrouter for the route.
//
// It will test the inner routes only if the parent route matched. For example:
//
// r := mux.NewRouter()
// s := r.Host("www.example.com").Subrouter()
// s.HandleFunc("/products/", ProductsHandler)
// s.HandleFunc("/products/{key}", ProductHandler)
// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
//
// Here, the routes registered in the subrouter won't be tested if the host
// doesn't match.
func (r *Route) Subrouter() *Router {
router := &Router{parent: r, strictSlash: r.strictSlash}
r.addMatcher(router)
return router
}
// ----------------------------------------------------------------------------
// URL building
// ----------------------------------------------------------------------------
// URL builds a URL for the route.
//
// It accepts a sequence of key/value pairs for the route variables. For
// example, given this route:
//
// r := mux.NewRouter()
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Name("article")
//
// ...a URL for it can be built using:
//
// url, err := r.Get("article").URL("category", "technology", "id", "42")
//
// ...which will return an url.URL with the following path:
//
// "/articles/technology/42"
//
// This also works for host variables:
//
// r := mux.NewRouter()
// r.Host("{subdomain}.domain.com").
// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Name("article")
//
// // url.String() will be "http://news.domain.com/articles/technology/42"
// url, err := r.Get("article").URL("subdomain", "news",
// "category", "technology",
// "id", "42")
//
// All variables defined in the route are required, and their values must
// conform to the corresponding patterns.
func (r *Route) URL(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp == nil {
return nil, errors.New("mux: route doesn't have a host or path")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
var scheme, host, path string
if r.regexp.host != nil {
// Set a default scheme.
scheme = "http"
if host, err = r.regexp.host.url(values); err != nil {
return nil, err
}
}
if r.regexp.path != nil {
if path, err = r.regexp.path.url(values); err != nil {
return nil, err
}
}
return &url.URL{
Scheme: scheme,
Host: host,
Path: path,
}, nil
}
// URLHost builds the host part of the URL for a route. See Route.URL().
//
// The route must have a host defined.
func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp == nil || r.regexp.host == nil {
return nil, errors.New("mux: route doesn't have a host")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
host, err := r.regexp.host.url(values)
if err != nil {
return nil, err
}
return &url.URL{
Scheme: "http",
Host: host,
}, nil
}
// URLPath builds the path part of the URL for a route. See Route.URL().
//
// The route must have a path defined.
func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp == nil || r.regexp.path == nil {
return nil, errors.New("mux: route doesn't have a path")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
path, err := r.regexp.path.url(values)
if err != nil {
return nil, err
}
return &url.URL{
Path: path,
}, nil
}
// GetPathTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a path.
func (r *Route) GetPathTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp == nil || r.regexp.path == nil {
return "", errors.New("mux: route doesn't have a path")
}
return r.regexp.path.template, nil
}
// GetHostTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a host.
func (r *Route) GetHostTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp == nil || r.regexp.host == nil {
return "", errors.New("mux: route doesn't have a host")
}
return r.regexp.host.template, nil
}
// prepareVars converts the route variable pairs into a map. If the route has a
// BuildVarsFunc, it is invoked.
func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
m, err := mapFromPairsToString(pairs...)
if err != nil {
return nil, err
}
return r.buildVars(m), nil
}
func (r *Route) buildVars(m map[string]string) map[string]string {
if r.parent != nil {
m = r.parent.buildVars(m)
}
if r.buildVarsFunc != nil {
m = r.buildVarsFunc(m)
}
return m
}
// ----------------------------------------------------------------------------
// parentRoute
// ----------------------------------------------------------------------------
// parentRoute allows routes to know about parent host and path definitions.
type parentRoute interface {
getNamedRoutes() map[string]*Route
getRegexpGroup() *routeRegexpGroup
buildVars(map[string]string) map[string]string
}
// getNamedRoutes returns the map where named routes are registered.
func (r *Route) getNamedRoutes() map[string]*Route {
if r.parent == nil {
// During tests router is not always set.
r.parent = NewRouter()
}
return r.parent.getNamedRoutes()
}
// getRegexpGroup returns regexp definitions from this route.
func (r *Route) getRegexpGroup() *routeRegexpGroup {
if r.regexp == nil {
if r.parent == nil {
// During tests router is not always set.
r.parent = NewRouter()
}
regexp := r.parent.getRegexpGroup()
if regexp == nil {
r.regexp = new(routeRegexpGroup)
} else {
// Copy.
r.regexp = &routeRegexpGroup{
host: regexp.host,
path: regexp.path,
queries: regexp.queries,
}
}
}
return r.regexp
}

View file

@ -1,27 +0,0 @@
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,70 +0,0 @@
pat
===
[![GoDoc](https://godoc.org/github.com/gorilla/pat?status.svg)](https://godoc.org/github.com/gorilla/pat)
[![Build Status](https://travis-ci.org/gorilla/pat.svg?branch=master)](https://travis-ci.org/gorilla/pat)
### How to use?
pat is pretty simple. The documentation lives [here](http://www.gorillatoolkit.org/pkg/pat).
### Install
With a properly configured Go toolchain:
```sh
go get github.com/gorilla/pat
```
### Example
Here's an example of a RESTful api:
```go
package main
import (
"log"
"net/http"
"github.com/gorilla/pat"
)
func homeHandler(wr http.ResponseWriter, req *http.Request) {
wr.WriteHeader(http.StatusOK)
wr.Write([]byte("Yay! We're home, Jim!"))
}
func getAllTheThings(wr http.ResponseWriter, req *http.Request) {
wr.WriteHeader(http.StatusOK)
wr.Write([]byte("Look, Jim! Get all the things!"))
}
func putOneThing(wr http.ResponseWriter, req *http.Request) {
wr.WriteHeader(http.StatusOK)
wr.Write([]byte("Look, Jim! Put one thing!"))
}
func deleteOneThing(wr http.ResponseWriter, req *http.Request) {
wr.WriteHeader(http.StatusOK)
wr.Write([]byte("Look, Jim! Delete one thing!"))
}
func main() {
router := pat.New()
router.Get("/things", getAllTheThings)
router.Put("/things/{id}", putOneThing)
router.Delete("/things/{id}", deleteOneThing)
router.Get("/", homeHandler)
http.Handle("/", router)
log.Print("Listening on 127.0.0.1:8000...")
log.Fatal(http.ListenAndServe(":8000", nil))
}
```
Notice how the routes descend? That's because Pat will take the first route
that matches. For your own testing, take the line ```router.Get("/",
homeHandler)``` and put it above the other routes and run the example. When you
try to curl any of the routes, you'll only get what the homeHandler returns.
Design your routes carefully.

67
vendor/github.com/gorilla/pat/doc.go generated vendored
View file

@ -1,67 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package gorilla/pat is a request router and dispatcher with a pat-like
interface. It is an alternative to gorilla/mux that showcases how it can
be used as a base for different API flavors. Package pat is documented at:
http://godoc.org/github.com/bmizerany/pat
Let's start registering a couple of URL paths and handlers:
func main() {
r := pat.New()
r.Get("/products", ProductsHandler)
r.Get("/articles", ArticlesHandler)
r.Get("/", HomeHandler)
http.Handle("/", r)
}
Here we register three routes mapping URL paths to handlers. This is
equivalent to how http.HandleFunc() works: if an incoming GET request matches
one of the paths, the corresponding handler is called passing
(http.ResponseWriter, *http.Request) as parameters.
Note: gorilla/pat matches path prefixes, so you must register the most
specific paths first.
Note: differently from pat, these methods accept a handler function, and not an
http.Handler. We think this is shorter and more convenient. To set an
http.Handler, use the Add() method.
Paths can have variables. They are defined using the format {name} or
{name:pattern}. If a regular expression pattern is not defined, the matched
variable will be anything until the next slash. For example:
r := pat.New()
r.Get("/articles/{category}/{id:[0-9]+}", ArticleHandler)
r.Get("/articles/{category}/", ArticlesCategoryHandler)
r.Get("/products/{key}", ProductHandler)
The names are used to create a map of route variables which are stored in the
URL query, prefixed by a colon:
category := req.URL.Query().Get(":category")
As in the gorilla/mux package, other matchers can be added to the registered
routes and URLs can be reversed as well. To build a URL for a route, first
add a name to it:
r.Get("/products/{key}", ProductHandler).Name("product")
Then you can get it using the name and generate a URL:
url, err := r.GetRoute("product").URL("key", "transmogrifier")
...and the result will be a url.URL with the following path:
"/products/transmogrifier"
Check the mux documentation for more details about URL building and extra
matchers:
http://gorilla-web.appspot.com/pkg/mux/
*/
package pat

126
vendor/github.com/gorilla/pat/pat.go generated vendored
View file

@ -1,126 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pat
import (
"net/http"
"net/url"
"path"
"strings"
"github.com/gorilla/context"
"github.com/gorilla/mux"
)
// New returns a new router.
func New() *Router {
return &Router{}
}
// Router is a request router that implements a pat-like API.
//
// pat docs: http://godoc.org/github.com/bmizerany/pat
type Router struct {
mux.Router
}
// Add registers a pattern with a handler for the given request method.
func (r *Router) Add(meth, pat string, h http.Handler) *mux.Route {
return r.NewRoute().PathPrefix(pat).Handler(h).Methods(meth)
}
// Options registers a pattern with a handler for OPTIONS requests.
func (r *Router) Options(pat string, h http.HandlerFunc) *mux.Route {
return r.Add("OPTIONS", pat, h)
}
// Delete registers a pattern with a handler for DELETE requests.
func (r *Router) Delete(pat string, h http.HandlerFunc) *mux.Route {
return r.Add("DELETE", pat, h)
}
// Head registers a pattern with a handler for HEAD requests.
func (r *Router) Head(pat string, h http.HandlerFunc) *mux.Route {
return r.Add("HEAD", pat, h)
}
// Get registers a pattern with a handler for GET requests.
func (r *Router) Get(pat string, h http.HandlerFunc) *mux.Route {
return r.Add("GET", pat, h)
}
// Post registers a pattern with a handler for POST requests.
func (r *Router) Post(pat string, h http.HandlerFunc) *mux.Route {
return r.Add("POST", pat, h)
}
// Put registers a pattern with a handler for PUT requests.
func (r *Router) Put(pat string, h http.HandlerFunc) *mux.Route {
return r.Add("PUT", pat, h)
}
// Patch registers a pattern with a handler for PATCH requests.
func (r *Router) Patch(pat string, h http.HandlerFunc) *mux.Route {
return r.Add("PATCH", pat, h)
}
// ServeHTTP dispatches the handler registered in the matched route.
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Clean path to canonical form and redirect.
if p := cleanPath(req.URL.Path); p != req.URL.Path {
w.Header().Set("Location", p)
w.WriteHeader(http.StatusMovedPermanently)
return
}
var match mux.RouteMatch
var handler http.Handler
if matched := r.Match(req, &match); matched {
handler = match.Handler
registerVars(req, match.Vars)
}
if handler == nil {
if r.NotFoundHandler == nil {
r.NotFoundHandler = http.NotFoundHandler()
}
handler = r.NotFoundHandler
}
if !r.KeepContext {
defer context.Clear(req)
}
handler.ServeHTTP(w, req)
}
// registerVars adds the matched route variables to the URL query.
func registerVars(r *http.Request, vars map[string]string) {
parts, i := make([]string, len(vars)), 0
for key, value := range vars {
parts[i] = url.QueryEscape(":"+key) + "=" + url.QueryEscape(value)
i++
}
q := strings.Join(parts, "&")
if r.URL.RawQuery == "" {
r.URL.RawQuery = q
} else {
r.URL.RawQuery += "&" + q
}
}
// cleanPath returns the canonical path for p, eliminating . and .. elements.
// Borrowed from the net/http package.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}

View file

@ -1,8 +0,0 @@
# This is the official list of Gorilla WebSocket authors for copyright
# purposes.
#
# Please keep the list sorted.
Gary Burd <gary@beagledreams.com>
Joachim Bauch <mail@joachim-bauch.de>

View file

@ -1,22 +0,0 @@
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,64 +0,0 @@
# Gorilla WebSocket
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket)
[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
### Documentation
* [API Reference](http://godoc.org/github.com/gorilla/websocket)
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
### Status
The Gorilla WebSocket package provides a complete and tested implementation of
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
package API is stable.
### Installation
go get github.com/gorilla/websocket
### Protocol Compliance
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
### Gorilla WebSocket compared with other packages
<table>
<tr>
<th></th>
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
</tr>
<tr>
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
<tr><td>Passes <a href="http://autobahn.ws/testsuite/">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
<tr><td colspan="3">Other Features</tr></td>
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
</table>
Notes:
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
2. The application can get the type of a received data message by implementing
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
function.
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
Read returns when the input buffer is full or a frame boundary is
encountered. Each call to Write sends a single frame message. The Gorilla
io.Reader and io.WriteCloser operate on a single WebSocket message.

View file

@ -1,392 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/base64"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"time"
)
// ErrBadHandshake is returned when the server response to opening handshake is
// invalid.
var ErrBadHandshake = errors.New("websocket: bad handshake")
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
// NewClient creates a new client connection using the given net connection.
// The URL u specifies the host and request URI. Use requestHeader to specify
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
// (Cookie). Use the response.Header to get the selected subprotocol
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
//
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
// non-nil *http.Response so that callers can handle redirects, authentication,
// etc.
//
// Deprecated: Use Dialer instead.
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
d := Dialer{
ReadBufferSize: readBufSize,
WriteBufferSize: writeBufSize,
NetDial: func(net, addr string) (net.Conn, error) {
return netConn, nil
},
}
return d.Dial(u.String(), requestHeader)
}
// A Dialer contains options for connecting to WebSocket server.
type Dialer struct {
// NetDial specifies the dial function for creating TCP connections. If
// NetDial is nil, net.Dial is used.
NetDial func(network, addr string) (net.Conn, error)
// Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the
// request is aborted with the provided error.
// If Proxy is nil or returns a nil *URL, no proxy is used.
Proxy func(*http.Request) (*url.URL, error)
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
// If nil, the default configuration is used.
TLSClientConfig *tls.Config
// HandshakeTimeout specifies the duration for the handshake to complete.
HandshakeTimeout time.Duration
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
// size is zero, then a useful default size is used. The I/O buffer sizes
// do not limit the size of the messages that can be sent or received.
ReadBufferSize, WriteBufferSize int
// Subprotocols specifies the client's requested subprotocols.
Subprotocols []string
// EnableCompression specifies if the client should attempt to negotiate
// per message compression (RFC 7692). Setting this value to true does not
// guarantee that compression will be supported. Currently only "no context
// takeover" modes are supported.
EnableCompression bool
// Jar specifies the cookie jar.
// If Jar is nil, cookies are not sent in requests and ignored
// in responses.
Jar http.CookieJar
}
var errMalformedURL = errors.New("malformed ws or wss URL")
// parseURL parses the URL.
//
// This function is a replacement for the standard library url.Parse function.
// In Go 1.4 and earlier, url.Parse loses information from the path.
func parseURL(s string) (*url.URL, error) {
// From the RFC:
//
// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
var u url.URL
switch {
case strings.HasPrefix(s, "ws://"):
u.Scheme = "ws"
s = s[len("ws://"):]
case strings.HasPrefix(s, "wss://"):
u.Scheme = "wss"
s = s[len("wss://"):]
default:
return nil, errMalformedURL
}
if i := strings.Index(s, "?"); i >= 0 {
u.RawQuery = s[i+1:]
s = s[:i]
}
if i := strings.Index(s, "/"); i >= 0 {
u.Opaque = s[i:]
s = s[:i]
} else {
u.Opaque = "/"
}
u.Host = s
if strings.Contains(u.Host, "@") {
// Don't bother parsing user information because user information is
// not allowed in websocket URIs.
return nil, errMalformedURL
}
return &u, nil
}
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
hostPort = u.Host
hostNoPort = u.Host
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
hostNoPort = hostNoPort[:i]
} else {
switch u.Scheme {
case "wss":
hostPort += ":443"
case "https":
hostPort += ":443"
default:
hostPort += ":80"
}
}
return hostPort, hostNoPort
}
// DefaultDialer is a dialer with all fields set to the default zero values.
var DefaultDialer = &Dialer{
Proxy: http.ProxyFromEnvironment,
}
// Dial creates a new client connection. Use requestHeader to specify the
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
// Use the response.Header to get the selected subprotocol
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
//
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
// non-nil *http.Response so that callers can handle redirects, authentication,
// etcetera. The response body may not contain the entire response and does not
// need to be closed by the application.
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
if d == nil {
d = &Dialer{
Proxy: http.ProxyFromEnvironment,
}
}
challengeKey, err := generateChallengeKey()
if err != nil {
return nil, nil, err
}
u, err := parseURL(urlStr)
if err != nil {
return nil, nil, err
}
switch u.Scheme {
case "ws":
u.Scheme = "http"
case "wss":
u.Scheme = "https"
default:
return nil, nil, errMalformedURL
}
if u.User != nil {
// User name and password are not allowed in websocket URIs.
return nil, nil, errMalformedURL
}
req := &http.Request{
Method: "GET",
URL: u,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: u.Host,
}
// Set the cookies present in the cookie jar of the dialer
if d.Jar != nil {
for _, cookie := range d.Jar.Cookies(u) {
req.AddCookie(cookie)
}
}
// Set the request headers using the capitalization for names and values in
// RFC examples. Although the capitalization shouldn't matter, there are
// servers that depend on it. The Header.Set method is not used because the
// method canonicalizes the header names.
req.Header["Upgrade"] = []string{"websocket"}
req.Header["Connection"] = []string{"Upgrade"}
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
req.Header["Sec-WebSocket-Version"] = []string{"13"}
if len(d.Subprotocols) > 0 {
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
}
for k, vs := range requestHeader {
switch {
case k == "Host":
if len(vs) > 0 {
req.Host = vs[0]
}
case k == "Upgrade" ||
k == "Connection" ||
k == "Sec-Websocket-Key" ||
k == "Sec-Websocket-Version" ||
k == "Sec-Websocket-Extensions" ||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
default:
req.Header[k] = vs
}
}
if d.EnableCompression {
req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
}
hostPort, hostNoPort := hostPortNoPort(u)
var proxyURL *url.URL
// Check wether the proxy method has been configured
if d.Proxy != nil {
proxyURL, err = d.Proxy(req)
}
if err != nil {
return nil, nil, err
}
var targetHostPort string
if proxyURL != nil {
targetHostPort, _ = hostPortNoPort(proxyURL)
} else {
targetHostPort = hostPort
}
var deadline time.Time
if d.HandshakeTimeout != 0 {
deadline = time.Now().Add(d.HandshakeTimeout)
}
netDial := d.NetDial
if netDial == nil {
netDialer := &net.Dialer{Deadline: deadline}
netDial = netDialer.Dial
}
netConn, err := netDial("tcp", targetHostPort)
if err != nil {
return nil, nil, err
}
defer func() {
if netConn != nil {
netConn.Close()
}
}()
if err := netConn.SetDeadline(deadline); err != nil {
return nil, nil, err
}
if proxyURL != nil {
connectHeader := make(http.Header)
if user := proxyURL.User; user != nil {
proxyUser := user.Username()
if proxyPassword, passwordSet := user.Password(); passwordSet {
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
}
}
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: hostPort},
Host: hostPort,
Header: connectHeader,
}
connectReq.Write(netConn)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(netConn)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
return nil, nil, err
}
if resp.StatusCode != 200 {
f := strings.SplitN(resp.Status, " ", 2)
return nil, nil, errors.New(f[1])
}
}
if u.Scheme == "https" {
cfg := cloneTLSConfig(d.TLSClientConfig)
if cfg.ServerName == "" {
cfg.ServerName = hostNoPort
}
tlsConn := tls.Client(netConn, cfg)
netConn = tlsConn
if err := tlsConn.Handshake(); err != nil {
return nil, nil, err
}
if !cfg.InsecureSkipVerify {
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
return nil, nil, err
}
}
}
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)
if err := req.Write(netConn); err != nil {
return nil, nil, err
}
resp, err := http.ReadResponse(conn.br, req)
if err != nil {
return nil, nil, err
}
if d.Jar != nil {
if rc := resp.Cookies(); len(rc) > 0 {
d.Jar.SetCookies(u, rc)
}
}
if resp.StatusCode != 101 ||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
// Before closing the network connection on return from this
// function, slurp up some of the response to aid application
// debugging.
buf := make([]byte, 1024)
n, _ := io.ReadFull(resp.Body, buf)
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
return nil, resp, ErrBadHandshake
}
for _, ext := range parseExtensions(resp.Header) {
if ext[""] != "permessage-deflate" {
continue
}
_, snct := ext["server_no_context_takeover"]
_, cnct := ext["client_no_context_takeover"]
if !snct || !cnct {
return nil, resp, errInvalidCompression
}
conn.newCompressionWriter = compressNoContextTakeover
conn.newDecompressionReader = decompressNoContextTakeover
break
}
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
netConn.SetDeadline(time.Time{})
netConn = nil // to avoid close in defer.
return conn, resp, nil
}

View file

@ -1,16 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package websocket
import "crypto/tls"
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return cfg.Clone()
}

View file

@ -1,38 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.8
package websocket
import "crypto/tls"
// cloneTLSConfig clones all public fields except the fields
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
// config in active use.
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
if cfg == nil {
return &tls.Config{}
}
return &tls.Config{
Rand: cfg.Rand,
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
}
}

View file

@ -1,148 +0,0 @@
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"compress/flate"
"errors"
"io"
"strings"
"sync"
)
const (
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
maxCompressionLevel = flate.BestCompression
defaultCompressionLevel = 1
)
var (
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
flateReaderPool = sync.Pool{New: func() interface{} {
return flate.NewReader(nil)
}}
)
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
const tail =
// Add four bytes as specified in RFC
"\x00\x00\xff\xff" +
// Add final block to squelch unexpected EOF error from flate reader.
"\x01\x00\x00\xff\xff"
fr, _ := flateReaderPool.Get().(io.ReadCloser)
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
return &flateReadWrapper{fr}
}
func isValidCompressionLevel(level int) bool {
return minCompressionLevel <= level && level <= maxCompressionLevel
}
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
p := &flateWriterPools[level-minCompressionLevel]
tw := &truncWriter{w: w}
fw, _ := p.Get().(*flate.Writer)
if fw == nil {
fw, _ = flate.NewWriter(tw, level)
} else {
fw.Reset(tw)
}
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
}
// truncWriter is an io.Writer that writes all but the last four bytes of the
// stream to another io.Writer.
type truncWriter struct {
w io.WriteCloser
n int
p [4]byte
}
func (w *truncWriter) Write(p []byte) (int, error) {
n := 0
// fill buffer first for simplicity.
if w.n < len(w.p) {
n = copy(w.p[w.n:], p)
p = p[n:]
w.n += n
if len(p) == 0 {
return n, nil
}
}
m := len(p)
if m > len(w.p) {
m = len(w.p)
}
if nn, err := w.w.Write(w.p[:m]); err != nil {
return n + nn, err
}
copy(w.p[:], w.p[m:])
copy(w.p[len(w.p)-m:], p[len(p)-m:])
nn, err := w.w.Write(p[:len(p)-m])
return n + nn, err
}
type flateWriteWrapper struct {
fw *flate.Writer
tw *truncWriter
p *sync.Pool
}
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
if w.fw == nil {
return 0, errWriteClosed
}
return w.fw.Write(p)
}
func (w *flateWriteWrapper) Close() error {
if w.fw == nil {
return errWriteClosed
}
err1 := w.fw.Flush()
w.p.Put(w.fw)
w.fw = nil
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
}
err2 := w.tw.w.Close()
if err1 != nil {
return err1
}
return err2
}
type flateReadWrapper struct {
fr io.ReadCloser
}
func (r *flateReadWrapper) Read(p []byte) (int, error) {
if r.fr == nil {
return 0, io.ErrClosedPipe
}
n, err := r.fr.Read(p)
if err == io.EOF {
// Preemptively place the reader back in the pool. This helps with
// scenarios where the application does not call NextReader() soon after
// this final read.
r.Close()
}
return n, err
}
func (r *flateReadWrapper) Close() error {
if r.fr == nil {
return io.ErrClosedPipe
}
err := r.fr.Close()
flateReaderPool.Put(r.fr)
r.fr = nil
return err
}

File diff suppressed because it is too large Load diff

View file

@ -1,18 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
package websocket
import "io"
func (c *Conn) read(n int) ([]byte, error) {
p, err := c.br.Peek(n)
if err == io.EOF {
err = errUnexpectedEOF
}
c.br.Discard(len(p))
return p, err
}

View file

@ -1,21 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.5
package websocket
import "io"
func (c *Conn) read(n int) ([]byte, error) {
p, err := c.br.Peek(n)
if err == io.EOF {
err = errUnexpectedEOF
}
if len(p) > 0 {
// advance over the bytes just read
io.ReadFull(c.br, p)
}
return p, err
}

View file

@ -1,180 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package websocket implements the WebSocket protocol defined in RFC 6455.
//
// Overview
//
// The Conn type represents a WebSocket connection. A server application uses
// the Upgrade function from an Upgrader object with a HTTP request handler
// to get a pointer to a Conn:
//
// var upgrader = websocket.Upgrader{
// ReadBufferSize: 1024,
// WriteBufferSize: 1024,
// }
//
// func handler(w http.ResponseWriter, r *http.Request) {
// conn, err := upgrader.Upgrade(w, r, nil)
// if err != nil {
// log.Println(err)
// return
// }
// ... Use conn to send and receive messages.
// }
//
// Call the connection's WriteMessage and ReadMessage methods to send and
// receive messages as a slice of bytes. This snippet of code shows how to echo
// messages using these methods:
//
// for {
// messageType, p, err := conn.ReadMessage()
// if err != nil {
// return
// }
// if err = conn.WriteMessage(messageType, p); err != nil {
// return err
// }
// }
//
// In above snippet of code, p is a []byte and messageType is an int with value
// websocket.BinaryMessage or websocket.TextMessage.
//
// An application can also send and receive messages using the io.WriteCloser
// and io.Reader interfaces. To send a message, call the connection NextWriter
// method to get an io.WriteCloser, write the message to the writer and close
// the writer when done. To receive a message, call the connection NextReader
// method to get an io.Reader and read until io.EOF is returned. This snippet
// shows how to echo messages using the NextWriter and NextReader methods:
//
// for {
// messageType, r, err := conn.NextReader()
// if err != nil {
// return
// }
// w, err := conn.NextWriter(messageType)
// if err != nil {
// return err
// }
// if _, err := io.Copy(w, r); err != nil {
// return err
// }
// if err := w.Close(); err != nil {
// return err
// }
// }
//
// Data Messages
//
// The WebSocket protocol distinguishes between text and binary data messages.
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
// binary messages is left to the application.
//
// This package uses the TextMessage and BinaryMessage integer constants to
// identify the two data message types. The ReadMessage and NextReader methods
// return the type of the received message. The messageType argument to the
// WriteMessage and NextWriter methods specifies the type of a sent message.
//
// It is the application's responsibility to ensure that text messages are
// valid UTF-8 encoded text.
//
// Control Messages
//
// The WebSocket protocol defines three types of control messages: close, ping
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
// methods to send a control message to the peer.
//
// Connections handle received close messages by sending a close message to the
// peer and returning a *CloseError from the the NextReader, ReadMessage or the
// message Read method.
//
// Connections handle received ping and pong messages by invoking callback
// functions set with SetPingHandler and SetPongHandler methods. The callback
// functions are called from the NextReader, ReadMessage and the message Read
// methods.
//
// The default ping handler sends a pong to the peer. The application's reading
// goroutine can block for a short time while the handler writes the pong data
// to the connection.
//
// The application must read the connection to process ping, pong and close
// messages sent from the peer. If the application is not otherwise interested
// in messages from the peer, then the application should start a goroutine to
// read and discard messages from the peer. A simple example is:
//
// func readLoop(c *websocket.Conn) {
// for {
// if _, _, err := c.NextReader(); err != nil {
// c.Close()
// break
// }
// }
// }
//
// Concurrency
//
// Connections support one concurrent reader and one concurrent writer.
//
// Applications are responsible for ensuring that no more than one goroutine
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
// that no more than one goroutine calls the read methods (NextReader,
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
// concurrently.
//
// The Close and WriteControl methods can be called concurrently with all other
// methods.
//
// Origin Considerations
//
// Web browsers allow Javascript applications to open a WebSocket connection to
// any host. It's up to the server to enforce an origin policy using the Origin
// request header sent by the browser.
//
// The Upgrader calls the function specified in the CheckOrigin field to check
// the origin. If the CheckOrigin function returns false, then the Upgrade
// method fails the WebSocket handshake with HTTP status 403.
//
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
// the handshake if the Origin request header is present and not equal to the
// Host request header.
//
// An application can allow connections from any origin by specifying a
// function that always returns true:
//
// var upgrader = websocket.Upgrader{
// CheckOrigin: func(r *http.Request) bool { return true },
// }
//
// The deprecated Upgrade function does not enforce an origin policy. It's the
// application's responsibility to check the Origin header before calling
// Upgrade.
//
// Compression EXPERIMENTAL
//
// Per message compression extensions (RFC 7692) are experimentally supported
// by this package in a limited capacity. Setting the EnableCompression option
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
// support.
//
// var upgrader = websocket.Upgrader{
// EnableCompression: true,
// }
//
// If compression was successfully negotiated with the connection's peer, any
// message received in compressed form will be automatically decompressed.
// All Read methods will return uncompressed bytes.
//
// Per message compression of messages written to a connection can be enabled
// or disabled by calling the corresponding Conn method:
//
// conn.EnableWriteCompression(false)
//
// Currently this package does not support compression with "context takeover".
// This means that messages must be compressed and decompressed in isolation,
// without retaining sliding window or dictionary state across messages. For
// more details refer to RFC 7692.
//
// Use of compression is experimental and may result in decreased performance.
package websocket

View file

@ -1,55 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"encoding/json"
"io"
)
// WriteJSON is deprecated, use c.WriteJSON instead.
func WriteJSON(c *Conn, v interface{}) error {
return c.WriteJSON(v)
}
// WriteJSON writes the JSON encoding of v to the connection.
//
// See the documentation for encoding/json Marshal for details about the
// conversion of Go values to JSON.
func (c *Conn) WriteJSON(v interface{}) error {
w, err := c.NextWriter(TextMessage)
if err != nil {
return err
}
err1 := json.NewEncoder(w).Encode(v)
err2 := w.Close()
if err1 != nil {
return err1
}
return err2
}
// ReadJSON is deprecated, use c.ReadJSON instead.
func ReadJSON(c *Conn, v interface{}) error {
return c.ReadJSON(v)
}
// ReadJSON reads the next JSON-encoded message from the connection and stores
// it in the value pointed to by v.
//
// See the documentation for the encoding/json Unmarshal function for details
// about the conversion of JSON to a Go value.
func (c *Conn) ReadJSON(v interface{}) error {
_, r, err := c.NextReader()
if err != nil {
return err
}
err = json.NewDecoder(r).Decode(v)
if err == io.EOF {
// One value is expected in the message.
err = io.ErrUnexpectedEOF
}
return err
}

View file

@ -1,55 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
// this source code is governed by a BSD-style license that can be found in the
// LICENSE file.
// +build !appengine
package websocket
import "unsafe"
const wordSize = int(unsafe.Sizeof(uintptr(0)))
func maskBytes(key [4]byte, pos int, b []byte) int {
// Mask one byte at a time for small buffers.
if len(b) < 2*wordSize {
for i := range b {
b[i] ^= key[pos&3]
pos++
}
return pos & 3
}
// Mask one byte at a time to word boundary.
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
n = wordSize - n
for i := range b[:n] {
b[i] ^= key[pos&3]
pos++
}
b = b[n:]
}
// Create aligned word size key.
var k [wordSize]byte
for i := range k {
k[i] = key[(pos+i)&3]
}
kw := *(*uintptr)(unsafe.Pointer(&k))
// Mask one word at a time.
n := (len(b) / wordSize) * wordSize
for i := 0; i < n; i += wordSize {
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
}
// Mask one byte at a time for remaining bytes.
b = b[n:]
for i := range b {
b[i] ^= key[pos&3]
pos++
}
return pos & 3
}

View file

@ -1,15 +0,0 @@
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
// this source code is governed by a BSD-style license that can be found in the
// LICENSE file.
// +build appengine
package websocket
func maskBytes(key [4]byte, pos int, b []byte) int {
for i := range b {
b[i] ^= key[pos&3]
pos++
}
return pos & 3
}

View file

@ -1,103 +0,0 @@
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bytes"
"net"
"sync"
"time"
)
// PreparedMessage caches on the wire representations of a message payload.
// Use PreparedMessage to efficiently send a message payload to multiple
// connections. PreparedMessage is especially useful when compression is used
// because the CPU and memory expensive compression operation can be executed
// once for a given set of compression options.
type PreparedMessage struct {
messageType int
data []byte
err error
mu sync.Mutex
frames map[prepareKey]*preparedFrame
}
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
type prepareKey struct {
isServer bool
compress bool
compressionLevel int
}
// preparedFrame contains data in wire representation.
type preparedFrame struct {
once sync.Once
data []byte
}
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
// it to connection using WritePreparedMessage method. Valid wire
// representation will be calculated lazily only once for a set of current
// connection options.
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
pm := &PreparedMessage{
messageType: messageType,
frames: make(map[prepareKey]*preparedFrame),
data: data,
}
// Prepare a plain server frame.
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
if err != nil {
return nil, err
}
// To protect against caller modifying the data argument, remember the data
// copied to the plain server frame.
pm.data = frameData[len(frameData)-len(data):]
return pm, nil
}
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
pm.mu.Lock()
frame, ok := pm.frames[key]
if !ok {
frame = &preparedFrame{}
pm.frames[key] = frame
}
pm.mu.Unlock()
var err error
frame.once.Do(func() {
// Prepare a frame using a 'fake' connection.
// TODO: Refactor code in conn.go to allow more direct construction of
// the frame.
mu := make(chan bool, 1)
mu <- true
var nc prepareConn
c := &Conn{
conn: &nc,
mu: mu,
isServer: key.isServer,
compressionLevel: key.compressionLevel,
enableWriteCompression: true,
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
}
if key.compress {
c.newCompressionWriter = compressNoContextTakeover
}
err = c.WriteMessage(pm.messageType, pm.data)
frame.data = nc.buf.Bytes()
})
return pm.messageType, frame.data, err
}
type prepareConn struct {
buf bytes.Buffer
net.Conn
}
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }

View file

@ -1,291 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"bufio"
"errors"
"net"
"net/http"
"net/url"
"strings"
"time"
)
// HandshakeError describes an error with the handshake from the peer.
type HandshakeError struct {
message string
}
func (e HandshakeError) Error() string { return e.message }
// Upgrader specifies parameters for upgrading an HTTP connection to a
// WebSocket connection.
type Upgrader struct {
// HandshakeTimeout specifies the duration for the handshake to complete.
HandshakeTimeout time.Duration
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
// size is zero, then buffers allocated by the HTTP server are used. The
// I/O buffer sizes do not limit the size of the messages that can be sent
// or received.
ReadBufferSize, WriteBufferSize int
// Subprotocols specifies the server's supported protocols in order of
// preference. If this field is set, then the Upgrade method negotiates a
// subprotocol by selecting the first match in this list with a protocol
// requested by the client.
Subprotocols []string
// Error specifies the function for generating HTTP error responses. If Error
// is nil, then http.Error is used to generate the HTTP response.
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
// CheckOrigin returns true if the request Origin header is acceptable. If
// CheckOrigin is nil, the host in the Origin header must not be set or
// must match the host of the request.
CheckOrigin func(r *http.Request) bool
// EnableCompression specify if the server should attempt to negotiate per
// message compression (RFC 7692). Setting this value to true does not
// guarantee that compression will be supported. Currently only "no context
// takeover" modes are supported.
EnableCompression bool
}
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
err := HandshakeError{reason}
if u.Error != nil {
u.Error(w, r, status, err)
} else {
w.Header().Set("Sec-Websocket-Version", "13")
http.Error(w, http.StatusText(status), status)
}
return nil, err
}
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
func checkSameOrigin(r *http.Request) bool {
origin := r.Header["Origin"]
if len(origin) == 0 {
return true
}
u, err := url.Parse(origin[0])
if err != nil {
return false
}
return u.Host == r.Host
}
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
if u.Subprotocols != nil {
clientProtocols := Subprotocols(r)
for _, serverProtocol := range u.Subprotocols {
for _, clientProtocol := range clientProtocols {
if clientProtocol == serverProtocol {
return clientProtocol
}
}
}
} else if responseHeader != nil {
return responseHeader.Get("Sec-Websocket-Protocol")
}
return ""
}
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
//
// The responseHeader is included in the response to the client's upgrade
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
// application negotiated subprotocol (Sec-Websocket-Protocol).
//
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
// response.
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
if r.Method != "GET" {
return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
}
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
}
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
}
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
}
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
}
checkOrigin := u.CheckOrigin
if checkOrigin == nil {
checkOrigin = checkSameOrigin
}
if !checkOrigin(r) {
return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
}
challengeKey := r.Header.Get("Sec-Websocket-Key")
if challengeKey == "" {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank")
}
subprotocol := u.selectSubprotocol(r, responseHeader)
// Negotiate PMCE
var compress bool
if u.EnableCompression {
for _, ext := range parseExtensions(r.Header) {
if ext[""] != "permessage-deflate" {
continue
}
compress = true
break
}
}
var (
netConn net.Conn
err error
)
h, ok := w.(http.Hijacker)
if !ok {
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
}
var brw *bufio.ReadWriter
netConn, brw, err = h.Hijack()
if err != nil {
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
}
if brw.Reader.Buffered() > 0 {
netConn.Close()
return nil, errors.New("websocket: client sent data before handshake is complete")
}
c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw)
c.subprotocol = subprotocol
if compress {
c.newCompressionWriter = compressNoContextTakeover
c.newDecompressionReader = decompressNoContextTakeover
}
p := c.writeBuf[:0]
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
p = append(p, computeAcceptKey(challengeKey)...)
p = append(p, "\r\n"...)
if c.subprotocol != "" {
p = append(p, "Sec-Websocket-Protocol: "...)
p = append(p, c.subprotocol...)
p = append(p, "\r\n"...)
}
if compress {
p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
}
for k, vs := range responseHeader {
if k == "Sec-Websocket-Protocol" {
continue
}
for _, v := range vs {
p = append(p, k...)
p = append(p, ": "...)
for i := 0; i < len(v); i++ {
b := v[i]
if b <= 31 {
// prevent response splitting.
b = ' '
}
p = append(p, b)
}
p = append(p, "\r\n"...)
}
}
p = append(p, "\r\n"...)
// Clear deadlines set by HTTP server.
netConn.SetDeadline(time.Time{})
if u.HandshakeTimeout > 0 {
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
}
if _, err = netConn.Write(p); err != nil {
netConn.Close()
return nil, err
}
if u.HandshakeTimeout > 0 {
netConn.SetWriteDeadline(time.Time{})
}
return c, nil
}
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
//
// This function is deprecated, use websocket.Upgrader instead.
//
// The application is responsible for checking the request origin before
// calling Upgrade. An example implementation of the same origin policy is:
//
// if req.Header.Get("Origin") != "http://"+req.Host {
// http.Error(w, "Origin not allowed", 403)
// return
// }
//
// If the endpoint supports subprotocols, then the application is responsible
// for negotiating the protocol used on the connection. Use the Subprotocols()
// function to get the subprotocols requested by the client. Use the
// Sec-Websocket-Protocol response header to specify the subprotocol selected
// by the application.
//
// The responseHeader is included in the response to the client's upgrade
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
// negotiated subprotocol (Sec-Websocket-Protocol).
//
// The connection buffers IO to the underlying network connection. The
// readBufSize and writeBufSize parameters specify the size of the buffers to
// use. Messages can be larger than the buffers.
//
// If the request is not a valid WebSocket handshake, then Upgrade returns an
// error of type HandshakeError. Applications should handle this error by
// replying to the client with an HTTP error response.
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
// don't return errors to maintain backwards compatibility
}
u.CheckOrigin = func(r *http.Request) bool {
// allow all connections by default
return true
}
return u.Upgrade(w, r, responseHeader)
}
// Subprotocols returns the subprotocols requested by the client in the
// Sec-Websocket-Protocol header.
func Subprotocols(r *http.Request) []string {
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
if h == "" {
return nil
}
protocols := strings.Split(h, ",")
for i := range protocols {
protocols[i] = strings.TrimSpace(protocols[i])
}
return protocols
}
// IsWebSocketUpgrade returns true if the client requested upgrade to the
// WebSocket protocol.
func IsWebSocketUpgrade(r *http.Request) bool {
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
tokenListContainsValue(r.Header, "Upgrade", "websocket")
}

View file

@ -1,214 +0,0 @@
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package websocket
import (
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"io"
"net/http"
"strings"
)
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
func computeAcceptKey(challengeKey string) string {
h := sha1.New()
h.Write([]byte(challengeKey))
h.Write(keyGUID)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func generateChallengeKey() (string, error) {
p := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, p); err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(p), nil
}
// Octet types from RFC 2616.
var octetTypes [256]byte
const (
isTokenOctet = 1 << iota
isSpaceOctet
)
func init() {
// From RFC 2616
//
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t byte
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
t |= isSpaceOctet
}
if isChar && !isCtl && !isSeparator {
t |= isTokenOctet
}
octetTypes[c] = t
}
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpaceOctet == 0 {
break
}
}
return s[i:]
}
func nextToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isTokenOctet == 0 {
break
}
}
return s[:i], s[i:]
}
func nextTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return nextToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j += 1
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j += 1
}
}
return "", ""
}
}
return "", ""
}
// tokenListContainsValue returns true if the 1#token header with the given
// name contains token.
func tokenListContainsValue(header http.Header, name string, value string) bool {
headers:
for _, s := range header[name] {
for {
var t string
t, s = nextToken(skipSpace(s))
if t == "" {
continue headers
}
s = skipSpace(s)
if s != "" && s[0] != ',' {
continue headers
}
if strings.EqualFold(t, value) {
return true
}
if s == "" {
continue headers
}
s = s[1:]
}
}
return false
}
// parseExtensiosn parses WebSocket extensions from a header.
func parseExtensions(header http.Header) []map[string]string {
// From RFC 6455:
//
// Sec-WebSocket-Extensions = extension-list
// extension-list = 1#extension
// extension = extension-token *( ";" extension-param )
// extension-token = registered-token
// registered-token = token
// extension-param = token [ "=" (token | quoted-string) ]
// ;When using the quoted-string syntax variant, the value
// ;after quoted-string unescaping MUST conform to the
// ;'token' ABNF.
var result []map[string]string
headers:
for _, s := range header["Sec-Websocket-Extensions"] {
for {
var t string
t, s = nextToken(skipSpace(s))
if t == "" {
continue headers
}
ext := map[string]string{"": t}
for {
s = skipSpace(s)
if !strings.HasPrefix(s, ";") {
break
}
var k string
k, s = nextToken(skipSpace(s[1:]))
if k == "" {
continue headers
}
s = skipSpace(s)
var v string
if strings.HasPrefix(s, "=") {
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
s = skipSpace(s)
}
if s != "" && s[0] != ',' && s[0] != ';' {
continue headers
}
ext[k] = v
}
if s != "" && s[0] != ',' {
continue headers
}
result = append(result, ext)
if s == "" {
continue headers
}
s = s[1:]
}
}
return result
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Ian Kent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,30 +0,0 @@
envconf
=======
Configure your Go application from the environment.
Supports most basic Go types and works nicely with the built in `flag` package.
```go
package main
import(
"flag"
"fmt"
. "github.com/ian-kent/envconf"
)
func main() {
count := flag.Int("count", FromEnvP("COUNT", 15).(int), "Count target")
flag.Parse()
for i := 1; i <= *count; i++ {
fmt.Printf("%d\n", i)
}
}
```
## Licence
Copyright ©‎ 2014, Ian Kent (http://iankent.uk).
Released under MIT license, see [LICENSE](LICENSE.md) for details.

View file

@ -1,88 +0,0 @@
package envconf
import (
"errors"
"os"
"strconv"
"strings"
)
var (
// ErrUnsupportedType is returned if the type passed in is unsupported
ErrUnsupportedType = errors.New("Unsupported type")
)
// FromEnvP is the same as FromEnv, but panics on error
func FromEnvP(env string, value interface{}) interface{} {
ev, err := FromEnv(env, value)
if err != nil {
panic(err)
}
return ev
}
// FromEnv returns the environment variable specified by env
// using the type of value
func FromEnv(env string, value interface{}) (interface{}, error) {
envs := os.Environ()
found := false
for _, e := range envs {
if strings.HasPrefix(e, env+"=") {
found = true
break
}
}
if !found {
return value, nil
}
ev := os.Getenv(env)
switch value.(type) {
case string:
vt := interface{}(ev)
return vt, nil
case int:
i, e := strconv.ParseInt(ev, 10, 64)
return int(i), e
case int8:
i, e := strconv.ParseInt(ev, 10, 8)
return int8(i), e
case int16:
i, e := strconv.ParseInt(ev, 10, 16)
return int16(i), e
case int32:
i, e := strconv.ParseInt(ev, 10, 32)
return int32(i), e
case int64:
i, e := strconv.ParseInt(ev, 10, 64)
return i, e
case uint:
i, e := strconv.ParseUint(ev, 10, 64)
return uint(i), e
case uint8:
i, e := strconv.ParseUint(ev, 10, 8)
return uint8(i), e
case uint16:
i, e := strconv.ParseUint(ev, 10, 16)
return uint16(i), e
case uint32:
i, e := strconv.ParseUint(ev, 10, 32)
return uint32(i), e
case uint64:
i, e := strconv.ParseUint(ev, 10, 64)
return i, e
case float32:
i, e := strconv.ParseFloat(ev, 32)
return float32(i), e
case float64:
i, e := strconv.ParseFloat(ev, 64)
return float64(i), e
case bool:
i, e := strconv.ParseBool(ev)
return i, e
default:
return value, ErrUnsupportedType
}
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Ian Kent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,22 +0,0 @@
package appenders
/*
Appenders control the flow of data from a logger to an output.
For example, a Console appender outputs log data to stdout.
Satisfy the Appender interface to implement your own log appender.
*/
import (
"github.com/ian-kent/go-log/layout"
"github.com/ian-kent/go-log/levels"
)
type Appender interface {
Write(level levels.LogLevel, message string, args ...interface{})
Layout() layout.Layout
SetLayout(layout.Layout)
}

View file

@ -1,31 +0,0 @@
package appenders
import (
"fmt"
"github.com/ian-kent/go-log/layout"
"github.com/ian-kent/go-log/levels"
)
type consoleAppender struct {
Appender
layout layout.Layout
}
func Console() *consoleAppender {
a := &consoleAppender{
layout: layout.Default(),
}
return a
}
func (a *consoleAppender) Write(level levels.LogLevel, message string, args ...interface{}) {
fmt.Println(a.Layout().Format(level, message, args...))
}
func (a *consoleAppender) Layout() layout.Layout {
return a.layout
}
func (a *consoleAppender) SetLayout(layout layout.Layout) {
a.layout = layout
}

View file

@ -1,64 +0,0 @@
package appenders
// TODO add tests
import (
"github.com/ian-kent/go-log/layout"
"github.com/ian-kent/go-log/levels"
"github.com/t-k/fluent-logger-golang/fluent"
)
type fluentdAppender struct {
Appender
layout layout.Layout
fluent *fluent.Fluent
fluentConfig fluent.Config
}
func SafeFluentd(config fluent.Config) (*fluentdAppender, error) {
a := &fluentdAppender{
layout: layout.Default(),
fluentConfig: config,
}
if err := a.Open(); err != nil {
return nil, err
}
return a, nil
}
func Fluentd(config fluent.Config) *fluentdAppender {
a, _ := SafeFluentd(config)
return a
}
func (a *fluentdAppender) Close() {
a.fluent.Close()
a.fluent = nil
}
func (a *fluentdAppender) Open() error {
f, err := fluent.New(a.fluentConfig)
if err != nil {
return err
}
a.fluent = f
return nil
}
func (a *fluentdAppender) Write(level levels.LogLevel, message string, args ...interface{}) {
// FIXME
// - use tag instead of "go-log"
// - get layout to return the map
var data = map[string]string{
"message": a.Layout().Format(level, message, args...),
}
a.fluent.Post("go-log", data)
}
func (a *fluentdAppender) Layout() layout.Layout {
return a.layout
}
func (a *fluentdAppender) SetLayout(layout layout.Layout) {
a.layout = layout
}

View file

@ -1,32 +0,0 @@
package appenders
import (
"github.com/ian-kent/go-log/layout"
"github.com/ian-kent/go-log/levels"
)
type multipleAppender struct {
currentLayout layout.Layout
listOfAppenders []Appender
}
func Multiple(layout layout.Layout, appenders ...Appender) Appender {
return &multipleAppender{
listOfAppenders: appenders,
currentLayout: layout,
}
}
func (this *multipleAppender) Layout() layout.Layout {
return this.currentLayout
}
func (this *multipleAppender) SetLayout(l layout.Layout) {
this.currentLayout = l
}
func (this *multipleAppender) Write(level levels.LogLevel, message string, args ...interface{}) {
for _, appender := range this.listOfAppenders {
appender.Write(level, message, args...)
}
}

View file

@ -1,122 +0,0 @@
package appenders
import (
"fmt"
"github.com/ian-kent/go-log/layout"
"github.com/ian-kent/go-log/levels"
"os"
"strconv"
"strings"
"sync"
)
type rollingFileAppender struct {
Appender
layout layout.Layout
MaxFileSize int64
MaxBackupIndex int
filename string
file *os.File
append bool
writeMutex sync.Mutex
bytesWritten int64
}
func RollingFile(filename string, append bool) *rollingFileAppender {
a := &rollingFileAppender{
layout: layout.Default(),
MaxFileSize: 104857600,
MaxBackupIndex: 1,
append: append,
bytesWritten: 0,
}
err := a.SetFilename(filename)
if err != nil {
fmt.Printf("Error opening file: %s\n", err)
return nil
}
return a
}
func (a *rollingFileAppender) Close() {
if a.file != nil {
a.file.Close()
a.file = nil
}
}
func (a *rollingFileAppender) Write(level levels.LogLevel, message string, args ...interface{}) {
m := a.Layout().Format(level, message, args...)
if !strings.HasSuffix(m, "\n") {
m += "\n"
}
a.writeMutex.Lock()
a.file.Write([]byte(m))
a.bytesWritten += int64(len(m))
if a.bytesWritten >= a.MaxFileSize {
a.bytesWritten = 0
a.rotateFile()
}
a.writeMutex.Unlock()
}
func (a *rollingFileAppender) Layout() layout.Layout {
return a.layout
}
func (a *rollingFileAppender) SetLayout(layout layout.Layout) {
a.layout = layout
}
func (a *rollingFileAppender) Filename() string {
return a.filename
}
func (a *rollingFileAppender) SetFilename(filename string) error {
if a.filename != filename || a.file == nil {
a.closeFile()
a.filename = filename
err := a.openFile()
return err
}
return nil
}
func (a *rollingFileAppender) rotateFile() {
a.closeFile()
lastFile := a.filename + "." + strconv.Itoa(a.MaxBackupIndex)
if _, err := os.Stat(lastFile); err == nil {
os.Remove(lastFile)
}
for n := a.MaxBackupIndex; n > 0; n-- {
f1 := a.filename + "." + strconv.Itoa(n)
f2 := a.filename + "." + strconv.Itoa(n+1)
os.Rename(f1, f2)
}
os.Rename(a.filename, a.filename+".1")
a.openFile()
}
func (a *rollingFileAppender) closeFile() {
if a.file != nil {
a.file.Close()
a.file = nil
}
}
func (a *rollingFileAppender) openFile() error {
mode := os.O_WRONLY | os.O_APPEND | os.O_CREATE
if !a.append {
mode = os.O_WRONLY | os.O_CREATE
}
f, err := os.OpenFile(a.filename, mode, 0666)
a.file = f
return err
}

View file

@ -1 +0,0 @@
Yet another test

View file

@ -1,2 +0,0 @@
Test message
Another test

View file

@ -1,18 +0,0 @@
package layout
import (
"fmt"
"github.com/ian-kent/go-log/levels"
)
type basicLayout struct {
Layout
}
func Basic() *basicLayout {
return &basicLayout{}
}
func (a *basicLayout) Format(level levels.LogLevel, message string, args ...interface{}) string {
return fmt.Sprintf(message, args...)
}

View file

@ -1,24 +0,0 @@
package layout
/*
Layouts control the formatting of data into a printable log string.
For example, the Basic layout passes the log message and arguments
through fmt.Sprintf.
Satisfy the Layout interface to implement your own log layout.
*/
import (
"github.com/ian-kent/go-log/levels"
)
type Layout interface {
Format(level levels.LogLevel, message string, args ...interface{}) string
}
func Default() Layout {
return Basic()
}

View file

@ -1,112 +0,0 @@
package layout
import (
"fmt"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/ian-kent/go-log/levels"
)
// http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html
// DefaultTimeLayout is the default layout used by %d
var DefaultTimeLayout = "2006-01-02 15:04:05.000000000 -0700 MST"
// LegacyDefaultTimeLayout is the legacy (non-zero padded) time layout.
// Set layout.DefaultTimeLayout = layout.LegacyDefaultTimeLayout to revert behaviour.
var LegacyDefaultTimeLayout = "2006-01-02 15:04:05.999999999 -0700 MST"
type patternLayout struct {
Layout
Pattern string
created int64
re *regexp.Regexp
}
type caller struct {
pc uintptr
file string
line int
ok bool
pkg string
fullpkg string
filename string
}
func Pattern(pattern string) *patternLayout {
return &patternLayout{
Pattern: pattern,
re: regexp.MustCompile("%(\\w|%)(?:{([^}]+)})?"),
created: time.Now().UnixNano(),
}
}
func getCaller() *caller {
pc, file, line, ok := runtime.Caller(2)
// TODO feels nasty?
dir, fn := filepath.Split(file)
bits := strings.Split(dir, "/")
pkg := bits[len(bits)-2]
if ok {
return &caller{pc, file, line, ok, pkg, pkg, fn}
}
return nil
}
func (a *patternLayout) Format(level levels.LogLevel, message string, args ...interface{}) string {
// TODO
// padding, e.g. %20c, %-20c, %.30c, %20.30c, %-20.30c
// %t - thread name
// %M - function name
caller := getCaller()
r := time.Now().UnixNano()
msg := a.re.ReplaceAllStringFunc(a.Pattern, func(m string) string {
parts := a.re.FindStringSubmatch(m)
switch parts[1] {
// FIXME
// %c and %C should probably return the logger name, not the package
// name, since that's how the logger is created in the first place!
case "c":
return caller.pkg
case "C":
return caller.pkg
case "d":
// FIXME specifier, e.g. %d{HH:mm:ss,SSS}
return time.Now().Format(DefaultTimeLayout)
case "F":
return caller.file
case "l":
return fmt.Sprintf("%s/%s:%d", caller.pkg, caller.filename, caller.line)
case "L":
return strconv.Itoa(caller.line)
case "m":
return fmt.Sprintf(message, args...)
case "n":
// FIXME platform-specific?
return "\n"
case "p":
return levels.LogLevelsToString[level]
case "r":
return strconv.FormatInt((r-a.created)/100000, 10)
case "x":
return "" // NDC
case "X":
return "" // MDC (must specify key)
case "%":
return "%"
}
return m
})
return msg
}

View file

@ -1,33 +0,0 @@
package levels
type LogLevel int
const (
FATAL LogLevel = iota
ERROR
INFO
WARN
DEBUG
TRACE
INHERIT
)
var StringToLogLevels = map[string]LogLevel{
"TRACE": TRACE,
"DEBUG": DEBUG,
"WARN": WARN,
"INFO": INFO,
"ERROR": ERROR,
"FATAL": FATAL,
"INHERIT": INHERIT,
}
var LogLevelsToString = map[LogLevel]string{
TRACE: "TRACE",
DEBUG: "DEBUG",
WARN: "WARN",
INFO: "INFO",
ERROR: "ERROR",
FATAL: "FATAL",
INHERIT: "INHERIT",
}

View file

@ -1,54 +0,0 @@
package log
import (
"github.com/ian-kent/go-log/levels"
"github.com/ian-kent/go-log/logger"
"strings"
)
var global logger.Logger
// Converts a string level (e.g. DEBUG) to a LogLevel
func Stol(level string) levels.LogLevel {
return levels.StringToLogLevels[strings.ToUpper(level)]
}
// Returns a Logger instance
//
// If no arguments are given, the global/root logger
// instance will be returned.
//
// If at least one argument is given, the logger instance
// for that namespace will be returned.
func Logger(args ...string) logger.Logger {
var name string
if len(args) > 0 {
name = args[0]
} else {
name = ""
}
if global == nil {
global = logger.New("")
global.SetLevel(levels.DEBUG)
}
l := global.GetLogger(name)
return l
}
func Log(level levels.LogLevel, params ...interface{}) {
Logger().Log(level, params...)
}
func Level(level levels.LogLevel) { Logger().Level() }
func Debug(params ...interface{}) { Log(levels.DEBUG, params...) }
func Info(params ...interface{}) { Log(levels.INFO, params...) }
func Warn(params ...interface{}) { Log(levels.WARN, params...) }
func Error(params ...interface{}) { Log(levels.ERROR, params...) }
func Trace(params ...interface{}) { Log(levels.TRACE, params...) }
func Fatal(params ...interface{}) { Log(levels.FATAL, params...) }
func Printf(params ...interface{}) { Log(levels.INFO, params...) }
func Println(params ...interface{}) { Log(levels.INFO, params...) }
func Fatalf(params ...interface{}) { Log(levels.FATAL, params...) }

View file

@ -1,221 +0,0 @@
package logger
import (
"fmt"
"os"
"strings"
"github.com/ian-kent/go-log/appenders"
"github.com/ian-kent/go-log/layout"
"github.com/ian-kent/go-log/levels"
)
// Logger represents a logger
type Logger interface {
Level() levels.LogLevel
Name() string
FullName() string
Enabled() map[levels.LogLevel]bool
Appender() Appender
Children() []Logger
Parent() Logger
GetLogger(string) Logger
SetLevel(levels.LogLevel)
Log(levels.LogLevel, ...interface{})
SetAppender(appender Appender)
Debug(params ...interface{})
Info(params ...interface{})
Warn(params ...interface{})
Error(params ...interface{})
Trace(params ...interface{})
Printf(params ...interface{})
Println(params ...interface{})
Fatal(params ...interface{})
Fatalf(params ...interface{})
}
type logger struct {
level levels.LogLevel
name string
enabled map[levels.LogLevel]bool
appender Appender
children []Logger
parent Logger
ExitOnFatal bool
}
// Appender represents a log appender
type Appender interface {
Write(level levels.LogLevel, message string, args ...interface{})
SetLayout(layout layout.Layout)
Layout() layout.Layout
}
// New returns a new Logger
func New(name string) Logger {
l := Logger(&logger{
level: levels.DEBUG,
name: name,
enabled: make(map[levels.LogLevel]bool),
appender: appenders.Console(),
children: make([]Logger, 0),
parent: nil,
ExitOnFatal: true,
})
l.SetLevel(levels.DEBUG)
return l
}
func unwrap(args ...interface{}) []interface{} {
head := args[0]
switch head.(type) {
case func() (string, []interface{}):
msg, args := head.(func() (string, []interface{}))()
args = unwrap(args...)
return append([]interface{}{msg}, args...)
case func() []interface{}:
args = unwrap(head.(func() []interface{})()...)
case func(...interface{}) []interface{}:
args = unwrap(head.(func(...interface{}) []interface{})(args[1:]...)...)
}
return args
}
func (l *logger) New(name string) Logger {
lg := Logger(&logger{
level: levels.INHERIT,
name: name,
enabled: make(map[levels.LogLevel]bool),
appender: nil,
children: make([]Logger, 0),
parent: l,
})
l.children = append(l.children, lg)
return lg
}
func (l *logger) GetLogger(name string) Logger {
bits := strings.Split(name, ".")
if l.name == bits[0] {
if len(bits) == 1 {
return l
}
child := bits[1]
n := strings.Join(bits[1:], ".")
for _, c := range l.children {
if c.Name() == child {
return c.GetLogger(n)
}
}
lg := l.New(child)
return lg.GetLogger(n)
}
lg := l.New(bits[0])
return lg.GetLogger(name)
}
type stringer interface {
String() string
}
func (l *logger) write(level levels.LogLevel, params ...interface{}) {
a := l.Appender()
if a != nil {
if s, ok := params[0].(string); ok {
a.Write(level, s, params[1:]...)
} else if s, ok := params[0].(stringer); ok {
a.Write(level, s.String(), params[1:]...)
} else {
a.Write(level, fmt.Sprintf("%s", params[0]), params[1:]...)
}
}
}
func (l *logger) Appender() Appender {
if a := l.appender; a != nil {
return a
}
if l.parent != nil {
if a := l.parent.Appender(); a != nil {
return a
}
}
return nil
}
func (l *logger) Log(level levels.LogLevel, params ...interface{}) {
if !l.Enabled()[level] {
return
}
l.write(level, unwrap(params...)...)
if l.ExitOnFatal && level == levels.FATAL {
os.Exit(1)
}
}
func (l *logger) Level() levels.LogLevel {
if l.level == levels.INHERIT {
return l.parent.Level()
}
return l.level
}
func (l *logger) Enabled() map[levels.LogLevel]bool {
if l.level == levels.INHERIT {
return l.parent.Enabled()
}
return l.enabled
}
func (l *logger) Name() string {
return l.name
}
func (l *logger) FullName() string {
n := l.name
if l.parent != nil {
p := l.parent.FullName()
if len(p) > 0 {
n = l.parent.FullName() + "." + n
}
}
return n
}
func (l *logger) Children() []Logger {
return l.children
}
func (l *logger) Parent() Logger {
return l.parent
}
func (l *logger) SetLevel(level levels.LogLevel) {
l.level = level
for k := range levels.LogLevelsToString {
if k <= level {
l.enabled[k] = true
} else {
l.enabled[k] = false
}
}
}
func (l *logger) SetAppender(appender Appender) {
l.appender = appender
}
func (l *logger) Debug(params ...interface{}) { l.Log(levels.DEBUG, params...) }
func (l *logger) Info(params ...interface{}) { l.Log(levels.INFO, params...) }
func (l *logger) Warn(params ...interface{}) { l.Log(levels.WARN, params...) }
func (l *logger) Error(params ...interface{}) { l.Log(levels.ERROR, params...) }
func (l *logger) Trace(params ...interface{}) { l.Log(levels.TRACE, params...) }
func (l *logger) Printf(params ...interface{}) { l.Log(levels.INFO, params...) }
func (l *logger) Println(params ...interface{}) { l.Log(levels.INFO, params...) }
func (l *logger) Fatal(params ...interface{}) { l.Log(levels.FATAL, params...) }
func (l *logger) Fatalf(params ...interface{}) { l.Log(levels.FATAL, params...) }

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Ian Kent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,12 +0,0 @@
Goose - Go Server-Sent Events [![GoDoc](https://godoc.org/github.com/ian-kent/goose?status.svg)](https://godoc.org/github.com/ian-kent/goose)
=============================
Goose implements Server-Sent Events in Go.
See [this example](example/main.go).
### Licence
Copyright ©‎ 2014, Ian Kent (http://www.iankent.eu).
Released under MIT license, see [LICENSE](LICENSE.md) for details.

View file

@ -1,122 +0,0 @@
package goose
import (
"bufio"
"errors"
"fmt"
"net"
"net/http"
"strings"
"sync"
)
var (
// ErrUnableToHijackRequest is returned by AddReceiver if the type
// conversion to http.Hijacker is unsuccessful
ErrUnableToHijackRequest = errors.New("Unable to hijack request")
)
// EventStream represents a collection of receivers
type EventStream struct {
mutex *sync.Mutex
receivers map[net.Conn]*EventReceiver
}
// NewEventStream creates a new event stream
func NewEventStream() *EventStream {
return &EventStream{
mutex: new(sync.Mutex),
receivers: make(map[net.Conn]*EventReceiver),
}
}
// EventReceiver represents a hijacked HTTP connection
type EventReceiver struct {
stream *EventStream
conn net.Conn
bufrw *bufio.ReadWriter
}
// Notify sends the event to all event stream receivers
func (es *EventStream) Notify(event string, bytes []byte) {
// TODO reader?
lines := strings.Split(string(bytes), "\n")
data := ""
for _, l := range lines {
data += event + ": " + l + "\n"
}
sz := len(data) + 1
size := fmt.Sprintf("%X", sz)
for _, er := range es.receivers {
go er.send(size, data)
}
}
func (er *EventReceiver) send(size, data string) {
_, err := er.write([]byte(size + "\r\n"))
if err != nil {
return
}
lines := strings.Split(data, "\n")
for _, ln := range lines {
_, err = er.write([]byte(ln + "\n"))
if err != nil {
return
}
}
er.write([]byte("\r\n"))
}
func (er *EventReceiver) write(bytes []byte) (int, error) {
n, err := er.bufrw.Write(bytes)
if err != nil {
er.stream.mutex.Lock()
delete(er.stream.receivers, er.conn)
er.stream.mutex.Unlock()
er.conn.Close()
return n, err
}
err = er.bufrw.Flush()
if err != nil {
er.stream.mutex.Lock()
delete(er.stream.receivers, er.conn)
er.stream.mutex.Unlock()
er.conn.Close()
}
return n, err
}
// AddReceiver hijacks a http.ResponseWriter and attaches it to the event stream
func (es *EventStream) AddReceiver(w http.ResponseWriter) (*EventReceiver, error) {
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
w.WriteHeader(200)
hj, ok := w.(http.Hijacker)
if !ok {
return nil, ErrUnableToHijackRequest
}
hjConn, hjBufrw, err := hj.Hijack()
if err != nil {
return nil, err
}
rec := &EventReceiver{es, hjConn, hjBufrw}
es.mutex.Lock()
es.receivers[hjConn] = rec
es.mutex.Unlock()
return rec, nil
}

View file

@ -1,41 +0,0 @@
linkio [![GoDoc](https://godoc.org/github.com/ian-kent/linkio?status.svg)](https://godoc.org/github.com/ian-kent/linkio) [![Build Status](https://travis-ci.org/ian-kent/linkio.svg?branch=master)](https://travis-ci.org/ian-kent/linkio)
======
linkio provides an io.Reader and io.Writer that simulate a network connection of a certain speed, e.g. to simulate a mobile connection.
### Quick start
You can use `linkio` to wrap existing io.Reader and io.Writer interfaces:
```go
// Create a new link at 512kbps
link = linkio.NewLink(512 * linkio.KilobitPerSecond)
// Open a connection
conn, err := net.Dial("tcp", "google.com:80")
if err != nil {
// handle error
}
// Create a link reader/writer
linkReader := link.NewLinkReader(io.Reader(conn))
linkWriter := link.NewLinkWriter(io.Writer(conn))
// Use them as you would normally...
fmt.Fprintf(linkWriter, "GET / HTTP/1.0\r\n\r\n")
status, err := bufio.NewReader(linkReader).ReadString('\n')
```
### LICENSE
This code is originally a fork of [code.google.com/p/jra-go/linkio](https://code.google.com/p/jra-go/source/browse/#hg%2Flinkio).
The source contained this license text:
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
There is no LICENSE file, but it [may be referring to this](http://opensource.org/licenses/BSD-3-Clause).
Any modifications since the initial commit are Copyright ©‎ 2014, Ian Kent (http://iankent.uk), and are released under the terms of the [MIT License](http://opensource.org/licenses/MIT).

View file

@ -1,169 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package linkio provides an io.Reader and io.Writer that
// simulate a network connection of a certain speed.
package linkio
import (
"io"
"time"
)
// Throughput represents the link speed as an int64 bits per second
// count. The representation limits the largest representable throughput
// to approximately 9223 petabits per second.
type Throughput int64
// Common throughputs.
//
// To count the number of units in a Duration, divide:
// kilobit := linkio.KilobitPerSecond
// fmt.Print(int64(kilobit/linkio.BitPerSecond)) // prints 1024
//
// To convert an integer number of units to a Throughput, multiply:
// megabits := 10
// fmt.Print(linkio.Throughput(megabits)*time.BitPerSecond) // prints 10s
//
const (
BitPerSecond Throughput = 1
BytePerSecond = 8 * BitPerSecond
KilobitPerSecond = 1024 * BitPerSecond
KilobytePerSecond = 1024 * BytePerSecond
MegabitPerSecond = 1024 * KilobitPerSecond
MegabytePerSecond = 1024 * KilobytePerSecond
GigabitPerSecond = 1024 * MegabitPerSecond
GigabytePerSecond = 1024 * MegabytePerSecond
)
// A LinkReader wraps an io.Reader, simulating reading from a
// shared access link with a fixed maximum speed.
type LinkReader struct {
r io.Reader
link *Link
}
// A LinkWriter wraps an io.Writer, simulating writer to a
// shared access link with a fixed maximum speed.
type LinkWriter struct {
w io.Writer
link *Link
}
// A Link serializes requests to sleep, simulating the way data travels
// across a link which is running at a certain kbps (kilo = 1024).
// Multiple LinkReaders can share a link (simulating multiple apps
// sharing a link). The sharing behavior is approximately fair, as implemented
// by Go when scheduling reads from a contested blocking channel.
type Link struct {
in chan linkRequest
out chan linkRequest
speed int64 // nanosec per bit
}
// A linkRequest asks the link to simulate sending that much data
// and return a true on the channel when it has accomplished the request.
type linkRequest struct {
bytes int
done chan bool
}
// NewLinkReader returns a LinkReader that returns bytes from r,
// simulating that they arrived from a shared link.
func (link *Link) NewLinkReader(r io.Reader) (s *LinkReader) {
s = &LinkReader{r: r, link: link}
return
}
// NewLinkWriter returns a LinkWriter that writes bytes to r,
// simulating that they arrived from a shared link.
func (link *Link) NewLinkWriter(w io.Writer) (s *LinkWriter) {
s = &LinkWriter{w: w, link: link}
return
}
// NewLink returns a new Link running at kbps.
func NewLink(throughput Throughput) (l *Link) {
// allow up to 100 outstanding requests
l = &Link{in: make(chan linkRequest, 100), out: make(chan linkRequest, 100)}
l.SetThroughput(throughput)
// This goroutine serializes the requests. He could calculate
// link utilization by comparing the time he sleeps waiting for
// linkRequests to arrive and the time he spends sleeping to simulate
// traffic flowing.
go func() {
for lr := range l.in {
// bits * nanosec/bit = nano to wait
delay := time.Duration(int64(lr.bytes*8) * l.speed)
time.Sleep(delay)
lr.done <- true
}
}()
go func() {
for lr := range l.out {
// bits * nanosec/bit = nano to wait
delay := time.Duration(int64(lr.bytes*8) * l.speed)
time.Sleep(delay)
lr.done <- true
}
}()
return
}
// SetThroughput sets the current link throughput
func (link *Link) SetThroughput(throughput Throughput) {
// link.speed is stored in ns/bit
link.speed = 1e9 / int64(throughput)
}
// why isn't this in package math? hmm.
func min(a, b int) int {
if a < b {
return a
}
return b
}
// Satisfies interface io.Reader.
func (l *LinkReader) Read(buf []byte) (n int, err error) {
// Read small chunks at a time, even if they ask for more,
// preventing one LinkReader from saturating the simulated link.
// 1500 is the MTU for Ethernet, i.e. a likely maximum packet
// size.
toRead := min(len(buf), 1500)
n, err = l.r.Read(buf[0:toRead])
if err != nil {
return 0, err
}
// send in the request to sleep to the Link and sleep
lr := linkRequest{bytes: n, done: make(chan bool)}
l.link.in <- lr
_ = <-lr.done
return
}
// Satisfies interface io.Writer.
func (l *LinkWriter) Write(buf []byte) (n int, err error) {
// Write small chunks at a time, even if they attempt more,
// preventing one LinkReader from saturating the simulated link.
// 1500 is the MTU for Ethernet, i.e. a likely maximum packet
// size.
toWrite := min(len(buf), 1500)
n, err = l.w.Write(buf[0:toWrite])
if err != nil {
return 0, err
}
// send in the request to sleep to the Link and sleep
lr := linkRequest{bytes: n, done: make(chan bool)}
l.link.in <- lr
_ = <-lr.done
return
}

18
vendor/github.com/jtolds/gls/LICENSE generated vendored
View file

@ -1,18 +0,0 @@
Copyright (c) 2013, Space Monkey, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,89 +0,0 @@
gls
===
Goroutine local storage
### IMPORTANT NOTE ###
It is my duty to point you to https://blog.golang.org/context, which is how
Google solves all of the problems you'd perhaps consider using this package
for at scale.
One downside to Google's approach is that *all* of your functions must have
a new first argument, but after clearing that hurdle everything else is much
better.
If you aren't interested in this warning, read on.
### Huhwaht? Why? ###
Every so often, a thread shows up on the
[golang-nuts](https://groups.google.com/d/forum/golang-nuts) asking for some
form of goroutine-local-storage, or some kind of goroutine id, or some kind of
context. There are a few valid use cases for goroutine-local-storage, one of
the most prominent being log line context. One poster was interested in being
able to log an HTTP request context id in every log line in the same goroutine
as the incoming HTTP request, without having to change every library and
function call he was interested in logging.
This would be pretty useful. Provided that you could get some kind of
goroutine-local-storage, you could call
[log.SetOutput](http://golang.org/pkg/log/#SetOutput) with your own logging
writer that checks goroutine-local-storage for some context information and
adds that context to your log lines.
But alas, Andrew Gerrand's typically diplomatic answer to the question of
goroutine-local variables was:
> We wouldn't even be having this discussion if thread local storage wasn't
> useful. But every feature comes at a cost, and in my opinion the cost of
> threadlocals far outweighs their benefits. They're just not a good fit for
> Go.
So, yeah, that makes sense. That's a pretty good reason for why the language
won't support a specific and (relatively) unuseful feature that requires some
runtime changes, just for the sake of a little bit of log improvement.
But does Go require runtime changes?
### How it works ###
Go has pretty fantastic introspective and reflective features, but one thing Go
doesn't give you is any kind of access to the stack pointer, or frame pointer,
or goroutine id, or anything contextual about your current stack. It gives you
access to your list of callers, but only along with program counters, which are
fixed at compile time.
But it does give you the stack.
So, we define 16 special functions and embed base-16 tags into the stack using
the call order of those 16 functions. Then, we can read our tags back out of
the stack looking at the callers list.
We then use these tags as an index into a traditional map for implementing
this library.
### What are people saying? ###
"Wow, that's horrifying."
"This is the most terrible thing I have seen in a very long time."
"Where is it getting a context from? Is this serializing all the requests?
What the heck is the client being bound to? What are these tags? Why does he
need callers? Oh god no. No no no."
### Docs ###
Please see the docs at http://godoc.org/github.com/jtolds/gls
### Related ###
If you're okay relying on the string format of the current runtime stacktrace
including a unique goroutine id (not guaranteed by the spec or anything, but
very unlikely to change within a Go release), you might be able to squeeze
out a bit more performance by using this similar library, inspired by some
code Brad Fitzpatrick wrote for debugging his HTTP/2 library:
https://github.com/tylerb/gls (in contrast, jtolds/gls doesn't require
any knowledge of the string format of the runtime stacktrace, which
probably adds unnecessary overhead).

View file

@ -1,153 +0,0 @@
// Package gls implements goroutine-local storage.
package gls
import (
"sync"
)
var (
mgrRegistry = make(map[*ContextManager]bool)
mgrRegistryMtx sync.RWMutex
)
// Values is simply a map of key types to value types. Used by SetValues to
// set multiple values at once.
type Values map[interface{}]interface{}
// ContextManager is the main entrypoint for interacting with
// Goroutine-local-storage. You can have multiple independent ContextManagers
// at any given time. ContextManagers are usually declared globally for a given
// class of context variables. You should use NewContextManager for
// construction.
type ContextManager struct {
mtx sync.Mutex
values map[uint]Values
}
// NewContextManager returns a brand new ContextManager. It also registers the
// new ContextManager in the ContextManager registry which is used by the Go
// method. ContextManagers are typically defined globally at package scope.
func NewContextManager() *ContextManager {
mgr := &ContextManager{values: make(map[uint]Values)}
mgrRegistryMtx.Lock()
defer mgrRegistryMtx.Unlock()
mgrRegistry[mgr] = true
return mgr
}
// Unregister removes a ContextManager from the global registry, used by the
// Go method. Only intended for use when you're completely done with a
// ContextManager. Use of Unregister at all is rare.
func (m *ContextManager) Unregister() {
mgrRegistryMtx.Lock()
defer mgrRegistryMtx.Unlock()
delete(mgrRegistry, m)
}
// SetValues takes a collection of values and a function to call for those
// values to be set in. Anything further down the stack will have the set
// values available through GetValue. SetValues will add new values or replace
// existing values of the same key and will not mutate or change values for
// previous stack frames.
// SetValues is slow (makes a copy of all current and new values for the new
// gls-context) in order to reduce the amount of lookups GetValue requires.
func (m *ContextManager) SetValues(new_values Values, context_call func()) {
if len(new_values) == 0 {
context_call()
return
}
mutated_keys := make([]interface{}, 0, len(new_values))
mutated_vals := make(Values, len(new_values))
EnsureGoroutineId(func(gid uint) {
m.mtx.Lock()
state, found := m.values[gid]
if !found {
state = make(Values, len(new_values))
m.values[gid] = state
}
m.mtx.Unlock()
for key, new_val := range new_values {
mutated_keys = append(mutated_keys, key)
if old_val, ok := state[key]; ok {
mutated_vals[key] = old_val
}
state[key] = new_val
}
defer func() {
if !found {
m.mtx.Lock()
delete(m.values, gid)
m.mtx.Unlock()
return
}
for _, key := range mutated_keys {
if val, ok := mutated_vals[key]; ok {
state[key] = val
} else {
delete(state, key)
}
}
}()
context_call()
})
}
// GetValue will return a previously set value, provided that the value was set
// by SetValues somewhere higher up the stack. If the value is not found, ok
// will be false.
func (m *ContextManager) GetValue(key interface{}) (
value interface{}, ok bool) {
gid, ok := GetGoroutineId()
if !ok {
return nil, false
}
m.mtx.Lock()
state, found := m.values[gid]
m.mtx.Unlock()
if !found {
return nil, false
}
value, ok = state[key]
return value, ok
}
func (m *ContextManager) getValues() Values {
gid, ok := GetGoroutineId()
if !ok {
return nil
}
m.mtx.Lock()
state, _ := m.values[gid]
m.mtx.Unlock()
return state
}
// Go preserves ContextManager values and Goroutine-local-storage across new
// goroutine invocations. The Go method makes a copy of all existing values on
// all registered context managers and makes sure they are still set after
// kicking off the provided function in a new goroutine. If you don't use this
// Go method instead of the standard 'go' keyword, you will lose values in
// ContextManagers, as goroutines have brand new stacks.
func Go(cb func()) {
mgrRegistryMtx.RLock()
defer mgrRegistryMtx.RUnlock()
for mgr := range mgrRegistry {
values := mgr.getValues()
if len(values) > 0 {
cb = func(mgr *ContextManager, cb func()) func() {
return func() { mgr.SetValues(values, cb) }
}(mgr, cb)
}
}
go cb()
}

View file

@ -1,21 +0,0 @@
package gls
import (
"sync"
)
var (
keyMtx sync.Mutex
keyCounter uint64
)
// ContextKey is a throwaway value you can use as a key to a ContextManager
type ContextKey struct{ id uint64 }
// GenSym will return a brand new, never-before-used ContextKey
func GenSym() ContextKey {
keyMtx.Lock()
defer keyMtx.Unlock()
keyCounter += 1
return ContextKey{id: keyCounter}
}

25
vendor/github.com/jtolds/gls/gid.go generated vendored
View file

@ -1,25 +0,0 @@
package gls
var (
stackTagPool = &idPool{}
)
// Will return this goroutine's identifier if set. If you always need a
// goroutine identifier, you should use EnsureGoroutineId which will make one
// if there isn't one already.
func GetGoroutineId() (gid uint, ok bool) {
return readStackTag()
}
// Will call cb with the current goroutine identifier. If one hasn't already
// been generated, one will be created and set first. The goroutine identifier
// might be invalid after cb returns.
func EnsureGoroutineId(cb func(gid uint)) {
if gid, ok := readStackTag(); ok {
cb(gid)
return
}
gid := stackTagPool.Acquire()
defer stackTagPool.Release(gid)
addStackTag(gid, func() { cb(gid) })
}

View file

@ -1,34 +0,0 @@
package gls
// though this could probably be better at keeping ids smaller, the goal of
// this class is to keep a registry of the smallest unique integer ids
// per-process possible
import (
"sync"
)
type idPool struct {
mtx sync.Mutex
released []uint
max_id uint
}
func (p *idPool) Acquire() (id uint) {
p.mtx.Lock()
defer p.mtx.Unlock()
if len(p.released) > 0 {
id = p.released[len(p.released)-1]
p.released = p.released[:len(p.released)-1]
return id
}
id = p.max_id
p.max_id++
return id
}
func (p *idPool) Release(id uint) {
p.mtx.Lock()
defer p.mtx.Unlock()
p.released = append(p.released, id)
}

View file

@ -1,108 +0,0 @@
package gls
// so, basically, we're going to encode integer tags in base-16 on the stack
const (
bitWidth = 4
stackBatchSize = 16
)
var (
pc_lookup = make(map[uintptr]int8, 17)
mark_lookup [16]func(uint, func())
)
func init() {
setEntries := func(f func(uint, func()), v int8) {
var ptr uintptr
f(0, func() {
ptr = findPtr()
})
pc_lookup[ptr] = v
if v >= 0 {
mark_lookup[v] = f
}
}
setEntries(github_com_jtolds_gls_markS, -0x1)
setEntries(github_com_jtolds_gls_mark0, 0x0)
setEntries(github_com_jtolds_gls_mark1, 0x1)
setEntries(github_com_jtolds_gls_mark2, 0x2)
setEntries(github_com_jtolds_gls_mark3, 0x3)
setEntries(github_com_jtolds_gls_mark4, 0x4)
setEntries(github_com_jtolds_gls_mark5, 0x5)
setEntries(github_com_jtolds_gls_mark6, 0x6)
setEntries(github_com_jtolds_gls_mark7, 0x7)
setEntries(github_com_jtolds_gls_mark8, 0x8)
setEntries(github_com_jtolds_gls_mark9, 0x9)
setEntries(github_com_jtolds_gls_markA, 0xa)
setEntries(github_com_jtolds_gls_markB, 0xb)
setEntries(github_com_jtolds_gls_markC, 0xc)
setEntries(github_com_jtolds_gls_markD, 0xd)
setEntries(github_com_jtolds_gls_markE, 0xe)
setEntries(github_com_jtolds_gls_markF, 0xf)
}
func addStackTag(tag uint, context_call func()) {
if context_call == nil {
return
}
github_com_jtolds_gls_markS(tag, context_call)
}
// these private methods are named this horrendous name so gopherjs support
// is easier. it shouldn't add any runtime cost in non-js builds.
func github_com_jtolds_gls_markS(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark0(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark1(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark2(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark3(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark4(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark5(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark6(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark7(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark8(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_mark9(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_markA(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_markB(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_markC(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_markD(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_markE(tag uint, cb func()) { _m(tag, cb) }
func github_com_jtolds_gls_markF(tag uint, cb func()) { _m(tag, cb) }
func _m(tag_remainder uint, cb func()) {
if tag_remainder == 0 {
cb()
} else {
mark_lookup[tag_remainder&0xf](tag_remainder>>bitWidth, cb)
}
}
func readStackTag() (tag uint, ok bool) {
var current_tag uint
offset := 0
for {
// the expectation with getStack is that it will either:
// * return everything when offset is 0 and ignore stackBatchSize,
// otherwise returning nothing when offset is not 0 (the gopherjs case)
// * or it will return at most stackBatchSize, respect offset, and
// shouldn't be called when it returns less than stackBatchSize
// (the runtime.Callers case).
batch := getStack(offset, stackBatchSize)
for _, pc := range batch {
val, ok := pc_lookup[pc]
if !ok {
continue
}
if val < 0 {
return current_tag, true
}
current_tag <<= bitWidth
current_tag += uint(val)
}
if len(batch) < stackBatchSize {
break
}
offset += len(batch)
}
return 0, false
}

View file

@ -1,75 +0,0 @@
// +build js
package gls
// This file is used for GopherJS builds, which don't have normal runtime
// stack trace support
import (
"strconv"
"strings"
"github.com/gopherjs/gopherjs/js"
)
const (
jsFuncNamePrefix = "github_com_jtolds_gls_mark"
)
func jsMarkStack() (f []uintptr) {
lines := strings.Split(
js.Global.Get("Error").New().Get("stack").String(), "\n")
f = make([]uintptr, 0, len(lines))
for i, line := range lines {
line = strings.TrimSpace(line)
if line == "" {
continue
}
if i == 0 {
if line != "Error" {
panic("didn't understand js stack trace")
}
continue
}
fields := strings.Fields(line)
if len(fields) < 2 || fields[0] != "at" {
panic("didn't understand js stack trace")
}
pos := strings.Index(fields[1], jsFuncNamePrefix)
if pos < 0 {
continue
}
pos += len(jsFuncNamePrefix)
if pos >= len(fields[1]) {
panic("didn't understand js stack trace")
}
char := string(fields[1][pos])
switch char {
case "S":
f = append(f, uintptr(0))
default:
val, err := strconv.ParseUint(char, 16, 8)
if err != nil {
panic("didn't understand js stack trace")
}
f = append(f, uintptr(val)+1)
}
}
return f
}
func findPtr() uintptr {
funcs := jsMarkStack()
if len(funcs) == 0 {
panic("failed to find function pointer")
}
return funcs[0]
}
func getStack(offset, amount int) []uintptr {
if offset != 0 {
return nil
}
return jsMarkStack()
}

View file

@ -1,23 +0,0 @@
// +build !js
package gls
// This file is used for standard Go builds, which have the expected runtime
// support
import (
"runtime"
)
func getStack(offset, amount int) []uintptr {
stack := make([]uintptr, amount)
return stack[:runtime.Callers(offset, stack)]
}
func findPtr() uintptr {
pc, _, _, ok := runtime.Caller(3)
if !ok {
panic("failed to find function pointer")
}
return pc
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Ian Kent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,10 +0,0 @@
MailHog data library [![GoDoc](https://godoc.org/github.com/mailhog/data?status.svg)](https://godoc.org/github.com/mailhog/data) [![Build Status](https://travis-ci.org/mailhog/data.svg?branch=master)](https://travis-ci.org/mailhog/data)
=========
`github.com/mailhog/data` implements a data library
### Licence
Copyright ©‎ 2014-2015, Ian Kent (http://iankent.uk)
Released under MIT license, see [LICENSE](LICENSE.md) for details.

View file

@ -1,330 +0,0 @@
package data
import (
"bytes"
"crypto/rand"
"encoding/base64"
"io"
"log"
"mime"
"strings"
"time"
)
// LogHandler is called for each log message. If nil, log messages will
// be output using log.Printf instead.
var LogHandler func(message string, args ...interface{})
func logf(message string, args ...interface{}) {
if LogHandler != nil {
LogHandler(message, args...)
} else {
log.Printf(message, args...)
}
}
// MessageID represents the ID of an SMTP message including the hostname part
type MessageID string
// NewMessageID generates a new message ID
func NewMessageID(hostname string) (MessageID, error) {
size := 32
rb := make([]byte, size)
_, err := rand.Read(rb)
if err != nil {
return MessageID(""), err
}
rs := base64.URLEncoding.EncodeToString(rb)
return MessageID(rs + "@" + hostname), nil
}
// Messages represents an array of Messages
// - TODO is this even required?
type Messages []Message
// Message represents a parsed SMTP message
type Message struct {
ID MessageID
From *Path
To []*Path
Content *Content
Created time.Time
MIME *MIMEBody // FIXME refactor to use Content.MIME
Raw *SMTPMessage
}
// Path represents an SMTP forward-path or return-path
type Path struct {
Relays []string
Mailbox string
Domain string
Params string
}
// Content represents the body content of an SMTP message
type Content struct {
Headers map[string][]string
Body string
Size int
MIME *MIMEBody
}
// SMTPMessage represents a raw SMTP message
type SMTPMessage struct {
From string
To []string
Data string
Helo string
}
// MIMEBody represents a collection of MIME parts
type MIMEBody struct {
Parts []*Content
}
// Parse converts a raw SMTP message to a parsed MIME message
func (m *SMTPMessage) Parse(hostname string) *Message {
var arr []*Path
for _, path := range m.To {
arr = append(arr, PathFromString(path))
}
id, _ := NewMessageID(hostname)
msg := &Message{
ID: id,
From: PathFromString(m.From),
To: arr,
Content: ContentFromString(m.Data),
Created: time.Now(),
Raw: m,
}
if msg.Content.IsMIME() {
logf("Parsing MIME body")
msg.MIME = msg.Content.ParseMIMEBody()
}
// find headers
var hasMessageID bool
var receivedHeaderName string
var returnPathHeaderName string
for k := range msg.Content.Headers {
if strings.ToLower(k) == "message-id" {
hasMessageID = true
continue
}
if strings.ToLower(k) == "received" {
receivedHeaderName = k
continue
}
if strings.ToLower(k) == "return-path" {
returnPathHeaderName = k
continue
}
}
if !hasMessageID {
msg.Content.Headers["Message-ID"] = []string{string(id)}
}
if len(receivedHeaderName) > 0 {
msg.Content.Headers[receivedHeaderName] = append(msg.Content.Headers[receivedHeaderName], "from "+m.Helo+" by "+hostname+" (MailHog)\r\n id "+string(id)+"; "+time.Now().Format(time.RFC1123Z))
} else {
msg.Content.Headers["Received"] = []string{"from " + m.Helo + " by " + hostname + " (MailHog)\r\n id " + string(id) + "; " + time.Now().Format(time.RFC1123Z)}
}
if len(returnPathHeaderName) > 0 {
msg.Content.Headers[returnPathHeaderName] = append(msg.Content.Headers[returnPathHeaderName], "<"+m.From+">")
} else {
msg.Content.Headers["Return-Path"] = []string{"<" + m.From + ">"}
}
return msg
}
// Bytes returns an io.Reader containing the raw message data
func (m *SMTPMessage) Bytes() io.Reader {
var b = new(bytes.Buffer)
b.WriteString("HELO:<" + m.Helo + ">\r\n")
b.WriteString("FROM:<" + m.From + ">\r\n")
for _, t := range m.To {
b.WriteString("TO:<" + t + ">\r\n")
}
b.WriteString("\r\n")
b.WriteString(m.Data)
return b
}
// FromBytes returns a SMTPMessage from raw message bytes (as output by SMTPMessage.Bytes())
func FromBytes(b []byte) *SMTPMessage {
msg := &SMTPMessage{}
var headerDone bool
for _, l := range strings.Split(string(b), "\n") {
if !headerDone {
if strings.HasPrefix(l, "HELO:<") {
l = strings.TrimPrefix(l, "HELO:<")
l = strings.TrimSuffix(l, ">\r")
msg.Helo = l
continue
}
if strings.HasPrefix(l, "FROM:<") {
l = strings.TrimPrefix(l, "FROM:<")
l = strings.TrimSuffix(l, ">\r")
msg.From = l
continue
}
if strings.HasPrefix(l, "TO:<") {
l = strings.TrimPrefix(l, "TO:<")
l = strings.TrimSuffix(l, ">\r")
msg.To = append(msg.To, l)
continue
}
if strings.TrimSpace(l) == "" {
headerDone = true
continue
}
}
msg.Data += l + "\n"
}
return msg
}
// Bytes returns an io.Reader containing the raw message data
func (m *Message) Bytes() io.Reader {
var b = new(bytes.Buffer)
for k, vs := range m.Content.Headers {
for _, v := range vs {
b.WriteString(k + ": " + v + "\r\n")
}
}
b.WriteString("\r\n")
b.WriteString(m.Content.Body)
return b
}
// IsMIME detects a valid MIME header
func (content *Content) IsMIME() bool {
header, ok := content.Headers["Content-Type"]
if !ok {
return false
}
return strings.HasPrefix(header[0], "multipart/")
}
// ParseMIMEBody parses SMTP message content into multiple MIME parts
func (content *Content) ParseMIMEBody() *MIMEBody {
var parts []*Content
if hdr, ok := content.Headers["Content-Type"]; ok {
if len(hdr) > 0 {
boundary := extractBoundary(hdr[0])
var p []string
if len(boundary) > 0 {
p = strings.Split(content.Body, "--"+boundary)
logf("Got boundary: %s", boundary)
} else {
logf("Boundary not found: %s", hdr[0])
}
for _, s := range p {
if len(s) > 0 {
part := ContentFromString(strings.Trim(s, "\r\n"))
if part.IsMIME() {
logf("Parsing inner MIME body")
part.MIME = part.ParseMIMEBody()
}
parts = append(parts, part)
}
}
}
}
return &MIMEBody{
Parts: parts,
}
}
// PathFromString parses a forward-path or reverse-path into its parts
func PathFromString(path string) *Path {
var relays []string
email := path
if strings.Contains(path, ":") {
x := strings.SplitN(path, ":", 2)
r, e := x[0], x[1]
email = e
relays = strings.Split(r, ",")
}
mailbox, domain := "", ""
if strings.Contains(email, "@") {
x := strings.SplitN(email, "@", 2)
mailbox, domain = x[0], x[1]
} else {
mailbox = email
}
return &Path{
Relays: relays,
Mailbox: mailbox,
Domain: domain,
Params: "", // FIXME?
}
}
// ContentFromString parses SMTP content into separate headers and body
func ContentFromString(data string) *Content {
logf("Parsing Content from string: '%s'", data)
x := strings.SplitN(data, "\r\n\r\n", 2)
h := make(map[string][]string, 0)
// FIXME this fails if the message content has no headers - specifically,
// if it doesn't contain \r\n\r\n
if len(x) == 2 {
headers, body := x[0], x[1]
hdrs := strings.Split(headers, "\r\n")
var lastHdr = ""
for _, hdr := range hdrs {
if lastHdr != "" && (strings.HasPrefix(hdr, " ") || strings.HasPrefix(hdr, "\t")) {
h[lastHdr][len(h[lastHdr])-1] = h[lastHdr][len(h[lastHdr])-1] + hdr
} else if strings.Contains(hdr, ": ") {
y := strings.SplitN(hdr, ": ", 2)
key, value := y[0], y[1]
// TODO multiple header fields
h[key] = []string{value}
lastHdr = key
} else if len(hdr) > 0 {
logf("Found invalid header: '%s'", hdr)
}
}
return &Content{
Size: len(data),
Headers: h,
Body: body,
}
}
return &Content{
Size: len(data),
Headers: h,
Body: x[0],
}
}
// extractBoundary extract boundary string in contentType.
// It returns empty string if no valid boundary found
func extractBoundary(contentType string) string {
_, params, err := mime.ParseMediaType(contentType)
if err == nil {
return params["boundary"]
}
return ""
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Ian Kent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,10 +0,0 @@
MailHog HTTP utilities [![GoDoc](https://godoc.org/github.com/mailhog/http?status.svg)](https://godoc.org/github.com/mailhog/http) [![Build Status](https://travis-ci.org/mailhog/http.svg?branch=master)](https://travis-ci.org/mailhog/http)
=========
`github.com/mailhog/http` provides HTTP utilities used by MailHog-UI and MailHog-Server.
### Licence
Copyright ©‎ 2014-2015, Ian Kent (http://iankent.uk)
Released under MIT license, see [LICENSE](LICENSE.md) for details.

View file

@ -1,112 +0,0 @@
package http
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/gorilla/pat"
"github.com/ian-kent/go-log/log"
"golang.org/x/crypto/bcrypt"
)
// Authorised should be given a function to enable HTTP Basic Authentication
var Authorised func(string, string) bool
var users map[string]string
// AuthFile sets Authorised to a function which validates against file
func AuthFile(file string) {
users = make(map[string]string)
b, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("[HTTP] Error reading auth-file: %s", err)
// FIXME - go-log
os.Exit(1)
}
buf := bytes.NewBuffer(b)
for {
l, err := buf.ReadString('\n')
l = strings.TrimSpace(l)
if len(l) > 0 {
p := strings.SplitN(l, ":", 2)
if len(p) < 2 {
log.Fatalf("[HTTP] Error reading auth-file, invalid line: %s", l)
// FIXME - go-log
os.Exit(1)
}
users[p[0]] = p[1]
}
switch {
case err == io.EOF:
break
case err != nil:
log.Fatalf("[HTTP] Error reading auth-file: %s", err)
// FIXME - go-log
os.Exit(1)
break
}
if err == io.EOF {
break
} else if err != nil {
}
}
log.Printf("[HTTP] Loaded %d users from %s", len(users), file)
Authorised = func(u, pw string) bool {
hpw, ok := users[u]
if !ok {
return false
}
err := bcrypt.CompareHashAndPassword([]byte(hpw), []byte(pw))
if err != nil {
return false
}
return true
}
}
// BasicAuthHandler is middleware to check HTTP Basic Authentication
// if an authorisation function is defined.
func BasicAuthHandler(h http.Handler) http.Handler {
f := func(w http.ResponseWriter, req *http.Request) {
if Authorised == nil {
h.ServeHTTP(w, req)
return
}
u, pw, ok := req.BasicAuth()
if !ok || !Authorised(u, pw) {
w.Header().Set("WWW-Authenticate", "Basic")
w.WriteHeader(401)
return
}
h.ServeHTTP(w, req)
}
return http.HandlerFunc(f)
}
// Listen binds to httpBindAddr
func Listen(httpBindAddr string, Asset func(string) ([]byte, error), exitCh chan int, registerCallback func(http.Handler)) {
log.Info("[HTTP] Binding to address: %s", httpBindAddr)
pat := pat.New()
registerCallback(pat)
//compress := handlers.CompressHandler(pat)
auth := BasicAuthHandler(pat) //compress)
err := http.ListenAndServe(httpBindAddr, auth)
if err != nil {
log.Fatalf("[HTTP] Error binding to address %s: %s", httpBindAddr, err)
}
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 - 2016 Ian Kent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,82 +0,0 @@
package cmd
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"net/mail"
"net/smtp"
"os"
"os/user"
)
import flag "github.com/spf13/pflag"
// Go runs the MailHog sendmail replacement.
func Go() {
host, err := os.Hostname()
if err != nil {
host = "localhost"
}
username := "nobody"
user, err := user.Current()
if err == nil && user != nil && len(user.Username) > 0 {
username = user.Username
}
fromAddr := username + "@" + host
smtpAddr := "localhost:1025"
var recip []string
// defaults from envars if provided
if len(os.Getenv("MH_SENDMAIL_SMTP_ADDR")) > 0 {
smtpAddr = os.Getenv("MH_SENDMAIL_SMTP_ADDR")
}
if len(os.Getenv("MH_SENDMAIL_FROM")) > 0 {
fromAddr = os.Getenv("MH_SENDMAIL_FROM")
}
var verbose bool
// override defaults from cli flags
flag.StringVar(&smtpAddr, "smtp-addr", smtpAddr, "SMTP server address")
flag.StringVarP(&fromAddr, "from", "f", fromAddr, "SMTP sender")
flag.BoolP("long-i", "i", true, "Ignored. This flag exists for sendmail compatibility.")
flag.BoolP("long-t", "t", true, "Ignored. This flag exists for sendmail compatibility.")
flag.BoolVarP(&verbose, "verbose", "v", false, "Verbose mode (sends debug output to stderr)")
flag.Parse()
// allow recipient to be passed as an argument
recip = flag.Args()
if verbose {
fmt.Fprintln(os.Stderr, smtpAddr, fromAddr)
}
body, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintln(os.Stderr, "error reading stdin")
os.Exit(11)
}
msg, err := mail.ReadMessage(bytes.NewReader(body))
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing message body")
os.Exit(11)
}
if len(recip) == 0 {
// We only need to parse the message to get a recipient if none where
// provided on the command line.
recip = append(recip, msg.Header.Get("To"))
}
err = smtp.SendMail(smtpAddr, nil, fromAddr, recip, body)
if err != nil {
fmt.Fprintln(os.Stderr, "error sending mail")
log.Fatal(err)
}
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Ian Kent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,100 +0,0 @@
MailHog SMTP Protocol [![GoDoc](https://godoc.org/github.com/mailhog/smtp?status.svg)](https://godoc.org/github.com/mailhog/smtp) [![Build Status](https://travis-ci.org/mailhog/smtp.svg?branch=master)](https://travis-ci.org/mailhog/smtp)
=========
`github.com/mailhog/smtp` implements an SMTP server state machine.
It attempts to encapsulate as much of the SMTP protocol (plus its extensions) as possible
without compromising configurability or requiring specific backend implementations.
* ESMTP server implementing [RFC5321](http://tools.ietf.org/html/rfc5321)
* Support for:
* AUTH [RFC4954](http://tools.ietf.org/html/rfc4954)
* PIPELINING [RFC2920](http://tools.ietf.org/html/rfc2920)
* STARTTLS [RFC3207](http://tools.ietf.org/html/rfc3207)
```go
proto := NewProtocol()
reply := proto.Start()
reply = proto.ProcessCommand("EHLO localhost")
// ...
```
See [MailHog-Server](https://github.com/mailhog/MailHog-Server) and [MailHog-MTA](https://github.com/mailhog/MailHog-MTA) for example implementations.
### Commands and replies
Interaction with the state machine is via:
* the `Parse` function
* the `ProcessCommand` and `ProcessData` functions
You can mix the use of all three functions as necessary.
#### Parse
`Parse` should be used on a raw text stream. It looks for an end of line (`\r\n`), and if found, processes a single command. Any unprocessed data is returned.
If any unprocessed data is returned, `Parse` should be
called again to process then next command.
```go
text := "EHLO localhost\r\nMAIL FROM:<test>\r\nDATA\r\nTest\r\n.\r\n"
var reply *smtp.Reply
for {
text, reply = proto.Parse(text)
if len(text) == 0 {
break
}
}
```
#### ProcessCommand and ProcessData
`ProcessCommand` should be used for an already parsed command (i.e., a complete
SMTP "line" excluding the line ending).
`ProcessData` should be used if the protocol is in `DATA` state.
```go
reply = proto.ProcessCommand("EHLO localhost")
reply = proto.ProcessCommand("MAIL FROM:<test>")
reply = proto.ProcessCommand("DATA")
reply = proto.ProcessData("Test\r\n.\r\n")
```
### Hooks
The state machine provides hooks to manipulate its behaviour.
See [![GoDoc](https://godoc.org/github.com/mailhog/smtp?status.svg)](https://godoc.org/github.com/mailhog/smtp) for more information.
| Hook | Description
| ---------------------------------- | -----------
| LogHandler | Called for every log message
| MessageReceivedHandler | Called for each message received
| ValidateSenderHandler | Called after MAIL FROM
| ValidateRecipientHandler | Called after RCPT TO
| ValidateAuthenticationHandler | Called after AUTH
| SMTPVerbFilter | Called for every SMTP command processed
| TLSHandler | Callback mashup called after STARTTLS
| GetAuthenticationMechanismsHandler | Called for each EHLO command
### Behaviour flags
The state machine also exports variables to control its behaviour:
See [![GoDoc](https://godoc.org/github.com/mailhog/smtp?status.svg)](https://godoc.org/github.com/mailhog/smtp) for more information.
| Variable | Description
| ---------------------- | -----------
| RejectBrokenRCPTSyntax | Reject non-conforming RCPT syntax
| RejectBrokenMAILSyntax | Reject non-conforming MAIL syntax
| RequireTLS | Require STARTTLS before other commands
| MaximumRecipients | Maximum recipients per message
| MaximumLineLength | Maximum length of SMTP line
### Licence
Copyright ©‎ 2014-2015, Ian Kent (http://iankent.uk)
Released under MIT license, see [LICENSE](LICENSE.md) for details.

View file

@ -1,504 +0,0 @@
package smtp
// http://www.rfc-editor.org/rfc/rfc5321.txt
import (
"encoding/base64"
"errors"
"log"
"regexp"
"strings"
"github.com/mailhog/data"
)
// Command is a struct representing an SMTP command (verb + arguments)
type Command struct {
verb string
args string
orig string
}
// ParseCommand returns a Command from the line string
func ParseCommand(line string) *Command {
words := strings.Split(line, " ")
command := strings.ToUpper(words[0])
args := strings.Join(words[1:len(words)], " ")
return &Command{
verb: command,
args: args,
orig: line,
}
}
// Protocol is a state machine representing an SMTP session
type Protocol struct {
lastCommand *Command
TLSPending bool
TLSUpgraded bool
State State
Message *data.SMTPMessage
Hostname string
Ident string
MaximumLineLength int
MaximumRecipients int
// LogHandler is called for each log message. If nil, log messages will
// be output using log.Printf instead.
LogHandler func(message string, args ...interface{})
// MessageReceivedHandler is called for each message accepted by the
// SMTP protocol. It must return a MessageID or error. If nil, messages
// will be rejected with an error.
MessageReceivedHandler func(*data.SMTPMessage) (string, error)
// ValidateSenderHandler should return true if the sender is valid,
// otherwise false. If nil, all senders will be accepted.
ValidateSenderHandler func(from string) bool
// ValidateRecipientHandler should return true if the recipient is valid,
// otherwise false. If nil, all recipients will be accepted.
ValidateRecipientHandler func(to string) bool
// ValidateAuthenticationhandler should return true if the authentication
// parameters are valid, otherwise false. If nil, all authentication
// attempts will be accepted.
ValidateAuthenticationHandler func(mechanism string, args ...string) (errorReply *Reply, ok bool)
// SMTPVerbFilter is called after each command is parsed, but before
// any code is executed. This provides an opportunity to reject unwanted verbs,
// e.g. to require AUTH before MAIL
SMTPVerbFilter func(verb string, args ...string) (errorReply *Reply)
// TLSHandler is called when a STARTTLS command is received.
//
// It should acknowledge the TLS request and set ok to true.
// It should also return a callback which will be invoked after the reply is
// sent. E.g., a TCP connection can only perform the upgrade after sending the reply
//
// Once the upgrade is complete, invoke the done function (e.g., from the returned callback)
//
// If TLS upgrade isn't possible, return an errorReply and set ok to false.
TLSHandler func(done func(ok bool)) (errorReply *Reply, callback func(), ok bool)
// GetAuthenticationMechanismsHandler should return an array of strings
// listing accepted authentication mechanisms
GetAuthenticationMechanismsHandler func() []string
// RejectBrokenRCPTSyntax controls whether the protocol accepts technically
// invalid syntax for the RCPT command. Set to true, the RCPT syntax requires
// no space between `TO:` and the opening `<`
RejectBrokenRCPTSyntax bool
// RejectBrokenMAILSyntax controls whether the protocol accepts technically
// invalid syntax for the MAIL command. Set to true, the MAIL syntax requires
// no space between `FROM:` and the opening `<`
RejectBrokenMAILSyntax bool
// RequireTLS controls whether TLS is required for a connection before other
// commands can be issued, applied at the protocol layer.
RequireTLS bool
}
// NewProtocol returns a new SMTP state machine in INVALID state
// handler is called when a message is received and should return a message ID
func NewProtocol() *Protocol {
p := &Protocol{
Hostname: "mailhog.example",
Ident: "ESMTP MailHog",
State: INVALID,
MaximumLineLength: -1,
MaximumRecipients: -1,
}
p.resetState()
return p
}
func (proto *Protocol) resetState() {
proto.Message = &data.SMTPMessage{}
}
func (proto *Protocol) logf(message string, args ...interface{}) {
message = strings.Join([]string{"[PROTO: %s]", message}, " ")
args = append([]interface{}{StateMap[proto.State]}, args...)
if proto.LogHandler != nil {
proto.LogHandler(message, args...)
} else {
log.Printf(message, args...)
}
}
// Start begins an SMTP conversation with a 220 reply, placing the state
// machine in ESTABLISH state.
func (proto *Protocol) Start() *Reply {
proto.logf("Started session, switching to ESTABLISH state")
proto.State = ESTABLISH
return ReplyIdent(proto.Hostname + " " + proto.Ident)
}
// Parse parses a line string and returns any remaining line string
// and a reply, if a command was found. Parse does nothing until a
// new line is found.
// - TODO decide whether to move this to a buffer inside Protocol
// sort of like it this way, since it gives control back to the caller
func (proto *Protocol) Parse(line string) (string, *Reply) {
var reply *Reply
if !strings.Contains(line, "\r\n") {
return line, reply
}
parts := strings.SplitN(line, "\r\n", 2)
line = parts[1]
if proto.MaximumLineLength > -1 {
if len(parts[0]) > proto.MaximumLineLength {
return line, ReplyLineTooLong()
}
}
// TODO collapse AUTH states into separate processing
if proto.State == DATA {
reply = proto.ProcessData(parts[0])
} else {
reply = proto.ProcessCommand(parts[0])
}
return line, reply
}
// ProcessData handles content received (with newlines stripped) while
// in the SMTP DATA state
func (proto *Protocol) ProcessData(line string) (reply *Reply) {
proto.Message.Data += line + "\r\n"
if strings.HasSuffix(proto.Message.Data, "\r\n.\r\n") {
proto.Message.Data = strings.Replace(proto.Message.Data, "\r\n..", "\r\n.", -1)
proto.logf("Got EOF, storing message and switching to MAIL state")
proto.Message.Data = strings.TrimSuffix(proto.Message.Data, "\r\n.\r\n")
proto.State = MAIL
defer proto.resetState()
if proto.MessageReceivedHandler == nil {
return ReplyStorageFailed("No storage backend")
}
id, err := proto.MessageReceivedHandler(proto.Message)
if err != nil {
proto.logf("Error storing message: %s", err)
return ReplyStorageFailed("Unable to store message")
}
return ReplyOk("Ok: queued as " + id)
}
return
}
// ProcessCommand processes a line of text as a command
// It expects the line string to be a properly formed SMTP verb and arguments
func (proto *Protocol) ProcessCommand(line string) (reply *Reply) {
line = strings.Trim(line, "\r\n")
proto.logf("Processing line: %s", line)
words := strings.Split(line, " ")
command := strings.ToUpper(words[0])
args := strings.Join(words[1:len(words)], " ")
proto.logf("In state %d, got command '%s', args '%s'", proto.State, command, args)
cmd := ParseCommand(strings.TrimSuffix(line, "\r\n"))
return proto.Command(cmd)
}
// Command applies an SMTP verb and arguments to the state machine
func (proto *Protocol) Command(command *Command) (reply *Reply) {
defer func() {
proto.lastCommand = command
}()
if proto.SMTPVerbFilter != nil {
proto.logf("sending to SMTP verb filter")
r := proto.SMTPVerbFilter(command.verb)
if r != nil {
proto.logf("response returned by SMTP verb filter")
return r
}
}
switch {
case proto.TLSPending && !proto.TLSUpgraded:
proto.logf("Got command before TLS upgrade complete")
// FIXME what to do?
return ReplyBye()
case "RSET" == command.verb:
proto.logf("Got RSET command, switching to MAIL state")
proto.State = MAIL
proto.Message = &data.SMTPMessage{}
return ReplyOk()
case "NOOP" == command.verb:
proto.logf("Got NOOP verb, staying in %s state", StateMap[proto.State])
return ReplyOk()
case "QUIT" == command.verb:
proto.logf("Got QUIT verb, staying in %s state", StateMap[proto.State])
proto.State = DONE
return ReplyBye()
case ESTABLISH == proto.State:
proto.logf("In ESTABLISH state")
switch command.verb {
case "HELO":
return proto.HELO(command.args)
case "EHLO":
return proto.EHLO(command.args)
case "STARTTLS":
return proto.STARTTLS(command.args)
default:
proto.logf("Got unknown command for ESTABLISH state: '%s'", command.verb)
return ReplyUnrecognisedCommand()
}
case "STARTTLS" == command.verb:
proto.logf("Got STARTTLS command outside ESTABLISH state")
return proto.STARTTLS(command.args)
case proto.RequireTLS && !proto.TLSUpgraded:
proto.logf("RequireTLS set and not TLS not upgraded")
return ReplyMustIssueSTARTTLSFirst()
case AUTHPLAIN == proto.State:
proto.logf("Got PLAIN authentication response: '%s', switching to MAIL state", command.args)
proto.State = MAIL
if proto.ValidateAuthenticationHandler != nil {
// TODO error handling
val, _ := base64.StdEncoding.DecodeString(command.orig)
bits := strings.Split(string(val), string(rune(0)))
if len(bits) < 3 {
return ReplyError(errors.New("Badly formed parameter"))
}
user, pass := bits[1], bits[2]
if reply, ok := proto.ValidateAuthenticationHandler("PLAIN", user, pass); !ok {
return reply
}
}
return ReplyAuthOk()
case AUTHLOGIN == proto.State:
proto.logf("Got LOGIN authentication response: '%s', switching to AUTHLOGIN2 state", command.args)
proto.State = AUTHLOGIN2
return ReplyAuthResponse("UGFzc3dvcmQ6")
case AUTHLOGIN2 == proto.State:
proto.logf("Got LOGIN authentication response: '%s', switching to MAIL state", command.args)
proto.State = MAIL
if proto.ValidateAuthenticationHandler != nil {
if reply, ok := proto.ValidateAuthenticationHandler("LOGIN", proto.lastCommand.orig, command.orig); !ok {
return reply
}
}
return ReplyAuthOk()
case AUTHCRAMMD5 == proto.State:
proto.logf("Got CRAM-MD5 authentication response: '%s', switching to MAIL state", command.args)
proto.State = MAIL
if proto.ValidateAuthenticationHandler != nil {
if reply, ok := proto.ValidateAuthenticationHandler("CRAM-MD5", command.orig); !ok {
return reply
}
}
return ReplyAuthOk()
case MAIL == proto.State:
proto.logf("In MAIL state")
switch command.verb {
case "AUTH":
proto.logf("Got AUTH command, staying in MAIL state")
switch {
case strings.HasPrefix(command.args, "PLAIN "):
proto.logf("Got PLAIN authentication: %s", strings.TrimPrefix(command.args, "PLAIN "))
if proto.ValidateAuthenticationHandler != nil {
val, _ := base64.StdEncoding.DecodeString(strings.TrimPrefix(command.args, "PLAIN "))
bits := strings.Split(string(val), string(rune(0)))
if len(bits) < 3 {
return ReplyError(errors.New("Badly formed parameter"))
}
user, pass := bits[1], bits[2]
if reply, ok := proto.ValidateAuthenticationHandler("PLAIN", user, pass); !ok {
return reply
}
}
return ReplyAuthOk()
case "LOGIN" == command.args:
proto.logf("Got LOGIN authentication, switching to AUTH state")
proto.State = AUTHLOGIN
return ReplyAuthResponse("VXNlcm5hbWU6")
case "PLAIN" == command.args:
proto.logf("Got PLAIN authentication (no args), switching to AUTH2 state")
proto.State = AUTHPLAIN
return ReplyAuthResponse("")
case "CRAM-MD5" == command.args:
proto.logf("Got CRAM-MD5 authentication, switching to AUTH state")
proto.State = AUTHCRAMMD5
return ReplyAuthResponse("PDQxOTI5NDIzNDEuMTI4Mjg0NzJAc291cmNlZm91ci5hbmRyZXcuY211LmVkdT4=")
case strings.HasPrefix(command.args, "EXTERNAL "):
proto.logf("Got EXTERNAL authentication: %s", strings.TrimPrefix(command.args, "EXTERNAL "))
if proto.ValidateAuthenticationHandler != nil {
if reply, ok := proto.ValidateAuthenticationHandler("EXTERNAL", strings.TrimPrefix(command.args, "EXTERNAL ")); !ok {
return reply
}
}
return ReplyAuthOk()
default:
return ReplyUnsupportedAuth()
}
case "MAIL":
proto.logf("Got MAIL command, switching to RCPT state")
from, err := proto.ParseMAIL(command.args)
if err != nil {
return ReplyError(err)
}
if proto.ValidateSenderHandler != nil {
if !proto.ValidateSenderHandler(from) {
// TODO correct sender error response
return ReplyError(errors.New("Invalid sender " + from))
}
}
proto.Message.From = from
proto.State = RCPT
return ReplySenderOk(from)
case "HELO":
return proto.HELO(command.args)
case "EHLO":
return proto.EHLO(command.args)
default:
proto.logf("Got unknown command for MAIL state: '%s'", command)
return ReplyUnrecognisedCommand()
}
case RCPT == proto.State:
proto.logf("In RCPT state")
switch command.verb {
case "RCPT":
proto.logf("Got RCPT command")
if proto.MaximumRecipients > -1 && len(proto.Message.To) >= proto.MaximumRecipients {
return ReplyTooManyRecipients()
}
to, err := proto.ParseRCPT(command.args)
if err != nil {
return ReplyError(err)
}
if proto.ValidateRecipientHandler != nil {
if !proto.ValidateRecipientHandler(to) {
// TODO correct send error response
return ReplyError(errors.New("Invalid recipient " + to))
}
}
proto.Message.To = append(proto.Message.To, to)
proto.State = RCPT
return ReplyRecipientOk(to)
case "HELO":
return proto.HELO(command.args)
case "EHLO":
return proto.EHLO(command.args)
case "DATA":
proto.logf("Got DATA command, switching to DATA state")
proto.State = DATA
return ReplyDataResponse()
default:
proto.logf("Got unknown command for RCPT state: '%s'", command)
return ReplyUnrecognisedCommand()
}
default:
proto.logf("Command not recognised")
return ReplyUnrecognisedCommand()
}
}
// HELO creates a reply to a HELO command
func (proto *Protocol) HELO(args string) (reply *Reply) {
proto.logf("Got HELO command, switching to MAIL state")
proto.State = MAIL
proto.Message.Helo = args
return ReplyOk("Hello " + args)
}
// EHLO creates a reply to a EHLO command
func (proto *Protocol) EHLO(args string) (reply *Reply) {
proto.logf("Got EHLO command, switching to MAIL state")
proto.State = MAIL
proto.Message.Helo = args
replyArgs := []string{"Hello " + args, "PIPELINING"}
if proto.TLSHandler != nil && !proto.TLSPending && !proto.TLSUpgraded {
replyArgs = append(replyArgs, "STARTTLS")
}
if !proto.RequireTLS || proto.TLSUpgraded {
if proto.GetAuthenticationMechanismsHandler != nil {
mechanisms := proto.GetAuthenticationMechanismsHandler()
if len(mechanisms) > 0 {
replyArgs = append(replyArgs, "AUTH "+strings.Join(mechanisms, " "))
}
}
}
return ReplyOk(replyArgs...)
}
// STARTTLS creates a reply to a STARTTLS command
func (proto *Protocol) STARTTLS(args string) (reply *Reply) {
if proto.TLSUpgraded {
return ReplyUnrecognisedCommand()
}
if proto.TLSHandler == nil {
proto.logf("tls handler not found")
return ReplyUnrecognisedCommand()
}
if len(args) > 0 {
return ReplySyntaxError("no parameters allowed")
}
r, callback, ok := proto.TLSHandler(func(ok bool) {
proto.TLSUpgraded = ok
proto.TLSPending = ok
if ok {
proto.resetState()
proto.State = ESTABLISH
}
})
if !ok {
return r
}
proto.TLSPending = true
return ReplyReadyToStartTLS(callback)
}
var parseMailBrokenRegexp = regexp.MustCompile("(?i:From):\\s*<([^>]+)>")
var parseMailRFCRegexp = regexp.MustCompile("(?i:From):<([^>]+)>")
// ParseMAIL returns the forward-path from a MAIL command argument
func (proto *Protocol) ParseMAIL(mail string) (string, error) {
var match []string
if proto.RejectBrokenMAILSyntax {
match = parseMailRFCRegexp.FindStringSubmatch(mail)
} else {
match = parseMailBrokenRegexp.FindStringSubmatch(mail)
}
if len(match) != 2 {
return "", errors.New("Invalid syntax in MAIL command")
}
return match[1], nil
}
var parseRcptBrokenRegexp = regexp.MustCompile("(?i:To):\\s*<([^>]+)>")
var parseRcptRFCRegexp = regexp.MustCompile("(?i:To):<([^>]+)>")
// ParseRCPT returns the return-path from a RCPT command argument
func (proto *Protocol) ParseRCPT(rcpt string) (string, error) {
var match []string
if proto.RejectBrokenRCPTSyntax {
match = parseRcptRFCRegexp.FindStringSubmatch(rcpt)
} else {
match = parseRcptBrokenRegexp.FindStringSubmatch(rcpt)
}
if len(match) != 2 {
return "", errors.New("Invalid syntax in RCPT command")
}
return match[1], nil
}

View file

@ -1,111 +0,0 @@
package smtp
import "strconv"
// http://www.rfc-editor.org/rfc/rfc5321.txt
// Reply is a struct representing an SMTP reply (status code + lines)
type Reply struct {
Status int
lines []string
Done func()
}
// Lines returns the formatted SMTP reply
func (r Reply) Lines() []string {
var lines []string
if len(r.lines) == 0 {
l := strconv.Itoa(r.Status)
lines = append(lines, l+"\n")
return lines
}
for i, line := range r.lines {
l := ""
if i == len(r.lines)-1 {
l = strconv.Itoa(r.Status) + " " + line + "\r\n"
} else {
l = strconv.Itoa(r.Status) + "-" + line + "\r\n"
}
lines = append(lines, l)
}
return lines
}
// ReplyIdent creates a 220 welcome reply
func ReplyIdent(ident string) *Reply { return &Reply{220, []string{ident}, nil} }
// ReplyReadyToStartTLS creates a 220 ready to start TLS reply
func ReplyReadyToStartTLS(callback func()) *Reply {
return &Reply{220, []string{"Ready to start TLS"}, callback}
}
// ReplyBye creates a 221 Bye reply
func ReplyBye() *Reply { return &Reply{221, []string{"Bye"}, nil} }
// ReplyAuthOk creates a 235 authentication successful reply
func ReplyAuthOk() *Reply { return &Reply{235, []string{"Authentication successful"}, nil} }
// ReplyOk creates a 250 Ok reply
func ReplyOk(message ...string) *Reply {
if len(message) == 0 {
message = []string{"Ok"}
}
return &Reply{250, message, nil}
}
// ReplySenderOk creates a 250 Sender ok reply
func ReplySenderOk(sender string) *Reply {
return &Reply{250, []string{"Sender " + sender + " ok"}, nil}
}
// ReplyRecipientOk creates a 250 Sender ok reply
func ReplyRecipientOk(recipient string) *Reply {
return &Reply{250, []string{"Recipient " + recipient + " ok"}, nil}
}
// ReplyAuthResponse creates a 334 authentication reply
func ReplyAuthResponse(response string) *Reply { return &Reply{334, []string{response}, nil} }
// ReplyDataResponse creates a 354 data reply
func ReplyDataResponse() *Reply { return &Reply{354, []string{"End data with <CR><LF>.<CR><LF>"}, nil} }
// ReplyStorageFailed creates a 452 error reply
func ReplyStorageFailed(reason string) *Reply { return &Reply{452, []string{reason}, nil} }
// ReplyUnrecognisedCommand creates a 500 Unrecognised command reply
func ReplyUnrecognisedCommand() *Reply { return &Reply{500, []string{"Unrecognised command"}, nil} }
// ReplyLineTooLong creates a 500 Line too long reply
func ReplyLineTooLong() *Reply { return &Reply{500, []string{"Line too long"}, nil} }
// ReplySyntaxError creates a 501 Syntax error reply
func ReplySyntaxError(response string) *Reply {
if len(response) > 0 {
response = " (" + response + ")"
}
return &Reply{501, []string{"Syntax error" + response}, nil}
}
// ReplyUnsupportedAuth creates a 504 unsupported authentication reply
func ReplyUnsupportedAuth() *Reply {
return &Reply{504, []string{"Unsupported authentication mechanism"}, nil}
}
// ReplyMustIssueSTARTTLSFirst creates a 530 reply for RFC3207
func ReplyMustIssueSTARTTLSFirst() *Reply {
return &Reply{530, []string{"Must issue a STARTTLS command first"}, nil}
}
// ReplyInvalidAuth creates a 535 error reply
func ReplyInvalidAuth() *Reply {
return &Reply{535, []string{"Authentication credentials invalid"}, nil}
}
// ReplyError creates a 500 error reply
func ReplyError(err error) *Reply { return &Reply{550, []string{err.Error()}, nil} }
// ReplyTooManyRecipients creates a 552 too many recipients reply
func ReplyTooManyRecipients() *Reply { return &Reply{552, []string{"Too many recipients"}, nil} }

View file

@ -1,32 +0,0 @@
package smtp
// State represents the state of an SMTP conversation
type State int
// SMTP message conversation states
const (
INVALID = State(-1)
ESTABLISH = State(iota)
AUTHPLAIN
AUTHLOGIN
AUTHLOGIN2
AUTHCRAMMD5
MAIL
RCPT
DATA
DONE
)
// StateMap provides string representations of SMTP conversation states
var StateMap = map[State]string{
INVALID: "INVALID",
ESTABLISH: "ESTABLISH",
AUTHPLAIN: "AUTHPLAIN",
AUTHLOGIN: "AUTHLOGIN",
AUTHLOGIN2: "AUTHLOGIN2",
AUTHCRAMMD5: "AUTHCRAMMD5",
MAIL: "MAIL",
RCPT: "RCPT",
DATA: "DATA",
DONE: "DONE",
}

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 - 2016 Ian Kent
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,16 +0,0 @@
MailHog storage backends [![GoDoc](https://godoc.org/github.com/mailhog/storage?status.svg)](https://godoc.org/github.com/mailhog/storage) [![Build Status](https://travis-ci.org/mailhog/storage.svg?branch=master)](https://travis-ci.org/mailhog/storage)
=========
`github.com/mailhog/storage` implements MailHog storage backends:
* In-memory
* MongoDB
You should implement `storage.Storage` interface to provide your
own storage backend.
### Licence
Copyright ©‎ 2014 - 2016, Ian Kent (http://iankent.uk)
Released under MIT license, see [LICENSE](LICENSE.md) for details.

View file

@ -1,184 +0,0 @@
package storage
import (
"errors"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/mailhog/data"
)
// Maildir is a maildir storage backend
type Maildir struct {
Path string
}
// CreateMaildir creates a new maildir storage backend
func CreateMaildir(path string) *Maildir {
if len(path) == 0 {
dir, err := ioutil.TempDir("", "mailhog")
if err != nil {
panic(err)
}
path = dir
}
if _, err := os.Stat(path); err != nil {
err := os.MkdirAll(path, 0770)
if err != nil {
panic(err)
}
}
log.Println("Maildir path is", path)
return &Maildir{
Path: path,
}
}
// Store stores a message and returns its storage ID
func (maildir *Maildir) Store(m *data.Message) (string, error) {
b, err := ioutil.ReadAll(m.Raw.Bytes())
if err != nil {
return "", err
}
err = ioutil.WriteFile(filepath.Join(maildir.Path, string(m.ID)), b, 0660)
return string(m.ID), err
}
// Count returns the number of stored messages
func (maildir *Maildir) Count() int {
// FIXME may be wrong, ../. ?
// and handle error?
dir, err := os.Open(maildir.Path)
if err != nil {
panic(err)
}
defer dir.Close()
n, _ := dir.Readdirnames(0)
return len(n)
}
// Search finds messages matching the query
func (maildir *Maildir) Search(kind, query string, start, limit int) (*data.Messages, int, error) {
query = strings.ToLower(query)
var filteredMessages = make([]data.Message, 0)
var matched int
err := filepath.Walk(maildir.Path, func(path string, info os.FileInfo, err error) error {
if limit > 0 && len(filteredMessages) >= limit {
return errors.New("reached limit")
}
if info.IsDir() {
return nil
}
msg, err := maildir.Load(info.Name())
if err != nil {
log.Println(err)
return nil
}
switch kind {
case "to":
for _, t := range msg.To {
if strings.Contains(strings.ToLower(t.Mailbox+"@"+t.Domain), query) {
if start > matched {
matched++
break
}
filteredMessages = append(filteredMessages, *msg)
break
}
}
case "from":
if strings.Contains(strings.ToLower(msg.From.Mailbox+"@"+msg.From.Domain), query) {
if start > matched {
matched++
break
}
filteredMessages = append(filteredMessages, *msg)
}
case "containing":
if strings.Contains(strings.ToLower(msg.Raw.Data), query) {
if start > matched {
matched++
break
}
filteredMessages = append(filteredMessages, *msg)
}
}
return nil
})
if err != nil {
log.Println(err)
}
msgs := data.Messages(filteredMessages)
return &msgs, len(filteredMessages), nil
}
// List lists stored messages by index
func (maildir *Maildir) List(start, limit int) (*data.Messages, error) {
log.Println("Listing messages in", maildir.Path)
messages := make([]data.Message, 0)
dir, err := os.Open(maildir.Path)
if err != nil {
return nil, err
}
defer dir.Close()
n, err := dir.Readdir(0)
if err != nil {
return nil, err
}
for _, fileinfo := range n {
b, err := ioutil.ReadFile(filepath.Join(maildir.Path, fileinfo.Name()))
if err != nil {
return nil, err
}
msg := data.FromBytes(b)
// FIXME domain
m := *msg.Parse("mailhog.example")
m.ID = data.MessageID(fileinfo.Name())
m.Created = fileinfo.ModTime()
messages = append(messages, m)
}
log.Printf("Found %d messages", len(messages))
msgs := data.Messages(messages)
return &msgs, nil
}
// DeleteOne deletes an individual message by storage ID
func (maildir *Maildir) DeleteOne(id string) error {
return os.Remove(filepath.Join(maildir.Path, id))
}
// DeleteAll deletes all in memory messages
func (maildir *Maildir) DeleteAll() error {
err := os.RemoveAll(maildir.Path)
if err != nil {
return err
}
return os.Mkdir(maildir.Path, 0770)
}
// Load returns an individual message by storage ID
func (maildir *Maildir) Load(id string) (*data.Message, error) {
b, err := ioutil.ReadFile(filepath.Join(maildir.Path, id))
if err != nil {
return nil, err
}
// FIXME domain
m := data.FromBytes(b).Parse("mailhog.example")
m.ID = data.MessageID(id)
return m, nil
}

View file

@ -1,199 +0,0 @@
package storage
import (
"errors"
"strings"
"sync"
"github.com/mailhog/data"
)
// InMemory is an in memory storage backend
type InMemory struct {
MessageIDIndex map[string]int
Messages []*data.Message
mu sync.Mutex
}
// CreateInMemory creates a new in memory storage backend
func CreateInMemory() *InMemory {
return &InMemory{
MessageIDIndex: make(map[string]int),
Messages: make([]*data.Message, 0),
}
}
// Store stores a message and returns its storage ID
func (memory *InMemory) Store(m *data.Message) (string, error) {
memory.mu.Lock()
defer memory.mu.Unlock()
memory.Messages = append(memory.Messages, m)
memory.MessageIDIndex[string(m.ID)] = len(memory.Messages) - 1
return string(m.ID), nil
}
// Count returns the number of stored messages
func (memory *InMemory) Count() int {
return len(memory.Messages)
}
// Search finds messages matching the query
func (memory *InMemory) Search(kind, query string, start, limit int) (*data.Messages, int, error) {
// FIXME needs optimising, or replacing with a proper db!
query = strings.ToLower(query)
var filteredMessages = make([]*data.Message, 0)
for _, m := range memory.Messages {
doAppend := false
switch kind {
case "to":
for _, to := range m.To {
if strings.Contains(strings.ToLower(to.Mailbox+"@"+to.Domain), query) {
doAppend = true
break
}
}
if !doAppend {
if hdr, ok := m.Content.Headers["To"]; ok {
for _, to := range hdr {
if strings.Contains(strings.ToLower(to), query) {
doAppend = true
break
}
}
}
}
case "from":
if strings.Contains(strings.ToLower(m.From.Mailbox+"@"+m.From.Domain), query) {
doAppend = true
}
if !doAppend {
if hdr, ok := m.Content.Headers["From"]; ok {
for _, from := range hdr {
if strings.Contains(strings.ToLower(from), query) {
doAppend = true
break
}
}
}
}
case "containing":
if strings.Contains(strings.ToLower(m.Content.Body), query) {
doAppend = true
}
if !doAppend {
for _, hdr := range m.Content.Headers {
for _, v := range hdr {
if strings.Contains(strings.ToLower(v), query) {
doAppend = true
}
}
}
}
}
if doAppend {
filteredMessages = append(filteredMessages, m)
}
}
var messages = make([]data.Message, 0)
if len(filteredMessages) == 0 || start > len(filteredMessages) {
msgs := data.Messages(messages)
return &msgs, 0, nil
}
if start+limit > len(filteredMessages) {
limit = len(filteredMessages) - start
}
start = len(filteredMessages) - start - 1
end := start - limit
if start < 0 {
start = 0
}
if end < -1 {
end = -1
}
for i := start; i > end; i-- {
//for _, m := range memory.MessageIndex[start:end] {
messages = append(messages, *filteredMessages[i])
}
msgs := data.Messages(messages)
return &msgs, len(filteredMessages), nil
}
// List lists stored messages by index
func (memory *InMemory) List(start int, limit int) (*data.Messages, error) {
var messages = make([]data.Message, 0)
if len(memory.Messages) == 0 || start > len(memory.Messages) {
msgs := data.Messages(messages)
return &msgs, nil
}
if start+limit > len(memory.Messages) {
limit = len(memory.Messages) - start
}
start = len(memory.Messages) - start - 1
end := start - limit
if start < 0 {
start = 0
}
if end < -1 {
end = -1
}
for i := start; i > end; i-- {
//for _, m := range memory.MessageIndex[start:end] {
messages = append(messages, *memory.Messages[i])
}
msgs := data.Messages(messages)
return &msgs, nil
}
// DeleteOne deletes an individual message by storage ID
func (memory *InMemory) DeleteOne(id string) error {
memory.mu.Lock()
defer memory.mu.Unlock()
var index int
var ok bool
if index, ok = memory.MessageIDIndex[id]; !ok && true {
return errors.New("message not found")
}
delete(memory.MessageIDIndex, id)
for k, v := range memory.MessageIDIndex {
if v > index {
memory.MessageIDIndex[k] = v - 1
}
}
memory.Messages = append(memory.Messages[:index], memory.Messages[index+1:]...)
return nil
}
// DeleteAll deletes all in memory messages
func (memory *InMemory) DeleteAll() error {
memory.mu.Lock()
defer memory.mu.Unlock()
memory.Messages = make([]*data.Message, 0)
memory.MessageIDIndex = make(map[string]int)
return nil
}
// Load returns an individual message by storage ID
func (memory *InMemory) Load(id string) (*data.Message, error) {
if idx, ok := memory.MessageIDIndex[id]; ok {
return memory.Messages[idx], nil
}
return nil, nil
}

View file

@ -1,122 +0,0 @@
package storage
import (
"github.com/mailhog/data"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"log"
)
// MongoDB represents MongoDB backed storage backend
type MongoDB struct {
Session *mgo.Session
Collection *mgo.Collection
}
// CreateMongoDB creates a MongoDB backed storage backend
func CreateMongoDB(uri, db, coll string) *MongoDB {
log.Printf("Connecting to MongoDB: %s\n", uri)
session, err := mgo.Dial(uri)
if err != nil {
log.Printf("Error connecting to MongoDB: %s", err)
return nil
}
err = session.DB(db).C(coll).EnsureIndexKey("created")
if err != nil {
log.Printf("Failed creating index: %s", err)
return nil
}
return &MongoDB{
Session: session,
Collection: session.DB(db).C(coll),
}
}
// Store stores a message in MongoDB and returns its storage ID
func (mongo *MongoDB) Store(m *data.Message) (string, error) {
err := mongo.Collection.Insert(m)
if err != nil {
log.Printf("Error inserting message: %s", err)
return "", err
}
return string(m.ID), nil
}
// Count returns the number of stored messages
func (mongo *MongoDB) Count() int {
c, _ := mongo.Collection.Count()
return c
}
// Search finds messages matching the query
func (mongo *MongoDB) Search(kind, query string, start, limit int) (*data.Messages, int, error) {
messages := &data.Messages{}
var count = 0
var field = "raw.data"
switch kind {
case "to":
field = "raw.to"
case "from":
field = "raw.from"
}
err := mongo.Collection.Find(bson.M{field: bson.RegEx{Pattern: query, Options: "i"}}).Skip(start).Limit(limit).Sort("-created").Select(bson.M{
"id": 1,
"_id": 1,
"from": 1,
"to": 1,
"content.headers": 1,
"content.size": 1,
"created": 1,
"raw": 1,
}).All(messages)
if err != nil {
log.Printf("Error loading messages: %s", err)
return nil, 0, err
}
count, _ = mongo.Collection.Find(bson.M{field: bson.RegEx{Pattern: query, Options: "i"}}).Count()
return messages, count, nil
}
// List returns a list of messages by index
func (mongo *MongoDB) List(start int, limit int) (*data.Messages, error) {
messages := &data.Messages{}
err := mongo.Collection.Find(bson.M{}).Skip(start).Limit(limit).Sort("-created").Select(bson.M{
"id": 1,
"_id": 1,
"from": 1,
"to": 1,
"content.headers": 1,
"content.size": 1,
"created": 1,
"raw": 1,
}).All(messages)
if err != nil {
log.Printf("Error loading messages: %s", err)
return nil, err
}
return messages, nil
}
// DeleteOne deletes an individual message by storage ID
func (mongo *MongoDB) DeleteOne(id string) error {
_, err := mongo.Collection.RemoveAll(bson.M{"id": id})
return err
}
// DeleteAll deletes all messages stored in MongoDB
func (mongo *MongoDB) DeleteAll() error {
_, err := mongo.Collection.RemoveAll(bson.M{})
return err
}
// Load loads an individual message by storage ID
func (mongo *MongoDB) Load(id string) (*data.Message, error) {
result := &data.Message{}
err := mongo.Collection.Find(bson.M{"id": id}).One(&result)
if err != nil {
log.Printf("Error loading message: %s", err)
return nil, err
}
return result, nil
}

View file

@ -1,14 +0,0 @@
package storage
import "github.com/mailhog/data"
// Storage represents a storage backend
type Storage interface {
Store(m *data.Message) (string, error)
List(start, limit int) (*data.Messages, error)
Search(kind, query string, start, limit int) (*data.Messages, int, error)
Count() int
DeleteOne(id string) error
DeleteAll() error
Load(id string) (*data.Message, error)
}

View file

@ -1,7 +0,0 @@
Copyright (c) 2014-2015, Philip Hofer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,315 +0,0 @@
# fwd
import "github.com/philhofer/fwd"
The `fwd` package provides a buffered reader
and writer. Each has methods that help improve
the encoding/decoding performance of some binary
protocols.
The `fwd.Writer` and `fwd.Reader` type provide similar
functionality to their counterparts in `bufio`, plus
a few extra utility methods that simplify read-ahead
and write-ahead. I wrote this package to improve serialization
performance for <a href="http://github.com/tinylib/msgp">http://github.com/tinylib/msgp</a>,
where it provided about a 2x speedup over `bufio` for certain
workloads. However, care must be taken to understand the semantics of the
extra methods provided by this package, as they allow
the user to access and manipulate the buffer memory
directly.
The extra methods for `fwd.Reader` are `Peek`, `Skip`
and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
will re-allocate the read buffer in order to accommodate arbitrarily
large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
in the stream, and uses the `io.Seeker` interface if the underlying
stream implements it. `(*fwd.Reader).Next` returns a slice pointing
to the next `n` bytes in the read buffer (like `Peek`), but also
increments the read position. This allows users to process streams
in arbitrary block sizes without having to manage appropriately-sized
slices. Additionally, obviating the need to copy the data from the
buffer to another location in memory can improve performance dramatically
in CPU-bound applications.
`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
returns a slice pointing to the next `n` bytes of the writer, and increments
the write position by the length of the returned slice. This allows users
to write directly to the end of the buffer.
## Constants
``` go
const (
// DefaultReaderSize is the default size of the read buffer
DefaultReaderSize = 2048
)
```
``` go
const (
// DefaultWriterSize is the
// default write buffer size.
DefaultWriterSize = 2048
)
```
## type Reader
``` go
type Reader struct {
// contains filtered or unexported fields
}
```
Reader is a buffered look-ahead reader
### func NewReader
``` go
func NewReader(r io.Reader) *Reader
```
NewReader returns a new *Reader that reads from 'r'
### func NewReaderSize
``` go
func NewReaderSize(r io.Reader, n int) *Reader
```
NewReaderSize returns a new *Reader that
reads from 'r' and has a buffer size 'n'
### func (\*Reader) BufferSize
``` go
func (r *Reader) BufferSize() int
```
BufferSize returns the total size of the buffer
### func (\*Reader) Buffered
``` go
func (r *Reader) Buffered() int
```
Buffered returns the number of bytes currently in the buffer
### func (\*Reader) Next
``` go
func (r *Reader) Next(n int) ([]byte, error)
```
Next returns the next 'n' bytes in the stream.
Unlike Peek, Next advances the reader position.
The returned bytes point to the same
data as the buffer, so the slice is
only valid until the next reader method call.
An EOF is considered an unexpected error.
If an the returned slice is less than the
length asked for, an error will be returned,
and the reader position will not be incremented.
### func (\*Reader) Peek
``` go
func (r *Reader) Peek(n int) ([]byte, error)
```
Peek returns the next 'n' buffered bytes,
reading from the underlying reader if necessary.
It will only return a slice shorter than 'n' bytes
if it also returns an error. Peek does not advance
the reader. EOF errors are *not* returned as
io.ErrUnexpectedEOF.
### func (\*Reader) Read
``` go
func (r *Reader) Read(b []byte) (int, error)
```
Read implements `io.Reader`
### func (\*Reader) ReadByte
``` go
func (r *Reader) ReadByte() (byte, error)
```
ReadByte implements `io.ByteReader`
### func (\*Reader) ReadFull
``` go
func (r *Reader) ReadFull(b []byte) (int, error)
```
ReadFull attempts to read len(b) bytes into
'b'. It returns the number of bytes read into
'b', and an error if it does not return len(b).
EOF is considered an unexpected error.
### func (\*Reader) Reset
``` go
func (r *Reader) Reset(rd io.Reader)
```
Reset resets the underlying reader
and the read buffer.
### func (\*Reader) Skip
``` go
func (r *Reader) Skip(n int) (int, error)
```
Skip moves the reader forward 'n' bytes.
Returns the number of bytes skipped and any
errors encountered. It is analogous to Seek(n, 1).
If the underlying reader implements io.Seeker, then
that method will be used to skip forward.
If the reader encounters
an EOF before skipping 'n' bytes, it
returns io.ErrUnexpectedEOF. If the
underlying reader implements io.Seeker, then
those rules apply instead. (Many implementations
will not return `io.EOF` until the next call
to Read.)
### func (\*Reader) WriteTo
``` go
func (r *Reader) WriteTo(w io.Writer) (int64, error)
```
WriteTo implements `io.WriterTo`
## type Writer
``` go
type Writer struct {
// contains filtered or unexported fields
}
```
Writer is a buffered writer
### func NewWriter
``` go
func NewWriter(w io.Writer) *Writer
```
NewWriter returns a new writer
that writes to 'w' and has a buffer
that is `DefaultWriterSize` bytes.
### func NewWriterSize
``` go
func NewWriterSize(w io.Writer, size int) *Writer
```
NewWriterSize returns a new writer
that writes to 'w' and has a buffer
that is 'size' bytes.
### func (\*Writer) BufferSize
``` go
func (w *Writer) BufferSize() int
```
BufferSize returns the maximum size of the buffer.
### func (\*Writer) Buffered
``` go
func (w *Writer) Buffered() int
```
Buffered returns the number of buffered bytes
in the reader.
### func (\*Writer) Flush
``` go
func (w *Writer) Flush() error
```
Flush flushes any buffered bytes
to the underlying writer.
### func (\*Writer) Next
``` go
func (w *Writer) Next(n int) ([]byte, error)
```
Next returns the next 'n' free bytes
in the write buffer, flushing the writer
as necessary. Next will return `io.ErrShortBuffer`
if 'n' is greater than the size of the write buffer.
Calls to 'next' increment the write position by
the size of the returned buffer.
### func (\*Writer) ReadFrom
``` go
func (w *Writer) ReadFrom(r io.Reader) (int64, error)
```
ReadFrom implements `io.ReaderFrom`
### func (\*Writer) Write
``` go
func (w *Writer) Write(p []byte) (int, error)
```
Write implements `io.Writer`
### func (\*Writer) WriteByte
``` go
func (w *Writer) WriteByte(b byte) error
```
WriteByte implements `io.ByteWriter`
### func (\*Writer) WriteString
``` go
func (w *Writer) WriteString(s string) (int, error)
```
WriteString is analogous to Write, but it takes a string.
- - -
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)

View file

@ -1,379 +0,0 @@
// The `fwd` package provides a buffered reader
// and writer. Each has methods that help improve
// the encoding/decoding performance of some binary
// protocols.
//
// The `fwd.Writer` and `fwd.Reader` type provide similar
// functionality to their counterparts in `bufio`, plus
// a few extra utility methods that simplify read-ahead
// and write-ahead. I wrote this package to improve serialization
// performance for http://github.com/tinylib/msgp,
// where it provided about a 2x speedup over `bufio` for certain
// workloads. However, care must be taken to understand the semantics of the
// extra methods provided by this package, as they allow
// the user to access and manipulate the buffer memory
// directly.
//
// The extra methods for `fwd.Reader` are `Peek`, `Skip`
// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
// will re-allocate the read buffer in order to accommodate arbitrarily
// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
// in the stream, and uses the `io.Seeker` interface if the underlying
// stream implements it. `(*fwd.Reader).Next` returns a slice pointing
// to the next `n` bytes in the read buffer (like `Peek`), but also
// increments the read position. This allows users to process streams
// in arbitrary block sizes without having to manage appropriately-sized
// slices. Additionally, obviating the need to copy the data from the
// buffer to another location in memory can improve performance dramatically
// in CPU-bound applications.
//
// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
// returns a slice pointing to the next `n` bytes of the writer, and increments
// the write position by the length of the returned slice. This allows users
// to write directly to the end of the buffer.
//
package fwd
import "io"
const (
// DefaultReaderSize is the default size of the read buffer
DefaultReaderSize = 2048
// minimum read buffer; straight from bufio
minReaderSize = 16
)
// NewReader returns a new *Reader that reads from 'r'
func NewReader(r io.Reader) *Reader {
return NewReaderSize(r, DefaultReaderSize)
}
// NewReaderSize returns a new *Reader that
// reads from 'r' and has a buffer size 'n'
func NewReaderSize(r io.Reader, n int) *Reader {
rd := &Reader{
r: r,
data: make([]byte, 0, max(minReaderSize, n)),
}
if s, ok := r.(io.Seeker); ok {
rd.rs = s
}
return rd
}
// Reader is a buffered look-ahead reader
type Reader struct {
r io.Reader // underlying reader
// data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space
data []byte // data
n int // read offset
state error // last read error
// if the reader past to NewReader was
// also an io.Seeker, this is non-nil
rs io.Seeker
}
// Reset resets the underlying reader
// and the read buffer.
func (r *Reader) Reset(rd io.Reader) {
r.r = rd
r.data = r.data[0:0]
r.n = 0
r.state = nil
if s, ok := rd.(io.Seeker); ok {
r.rs = s
} else {
r.rs = nil
}
}
// more() does one read on the underlying reader
func (r *Reader) more() {
// move data backwards so that
// the read offset is 0; this way
// we can supply the maximum number of
// bytes to the reader
if r.n != 0 {
if r.n < len(r.data) {
r.data = r.data[:copy(r.data[0:], r.data[r.n:])]
} else {
r.data = r.data[:0]
}
r.n = 0
}
var a int
a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)])
if a == 0 && r.state == nil {
r.state = io.ErrNoProgress
return
}
r.data = r.data[:len(r.data)+a]
}
// pop error
func (r *Reader) err() (e error) {
e, r.state = r.state, nil
return
}
// pop error; EOF -> io.ErrUnexpectedEOF
func (r *Reader) noEOF() (e error) {
e, r.state = r.state, nil
if e == io.EOF {
e = io.ErrUnexpectedEOF
}
return
}
// buffered bytes
func (r *Reader) buffered() int { return len(r.data) - r.n }
// Buffered returns the number of bytes currently in the buffer
func (r *Reader) Buffered() int { return len(r.data) - r.n }
// BufferSize returns the total size of the buffer
func (r *Reader) BufferSize() int { return cap(r.data) }
// Peek returns the next 'n' buffered bytes,
// reading from the underlying reader if necessary.
// It will only return a slice shorter than 'n' bytes
// if it also returns an error. Peek does not advance
// the reader. EOF errors are *not* returned as
// io.ErrUnexpectedEOF.
func (r *Reader) Peek(n int) ([]byte, error) {
// in the degenerate case,
// we may need to realloc
// (the caller asked for more
// bytes than the size of the buffer)
if cap(r.data) < n {
old := r.data[r.n:]
r.data = make([]byte, n+r.buffered())
r.data = r.data[:copy(r.data, old)]
r.n = 0
}
// keep filling until
// we hit an error or
// read enough bytes
for r.buffered() < n && r.state == nil {
r.more()
}
// we must have hit an error
if r.buffered() < n {
return r.data[r.n:], r.err()
}
return r.data[r.n : r.n+n], nil
}
// Skip moves the reader forward 'n' bytes.
// Returns the number of bytes skipped and any
// errors encountered. It is analogous to Seek(n, 1).
// If the underlying reader implements io.Seeker, then
// that method will be used to skip forward.
//
// If the reader encounters
// an EOF before skipping 'n' bytes, it
// returns io.ErrUnexpectedEOF. If the
// underlying reader implements io.Seeker, then
// those rules apply instead. (Many implementations
// will not return `io.EOF` until the next call
// to Read.)
func (r *Reader) Skip(n int) (int, error) {
// fast path
if r.buffered() >= n {
r.n += n
return n, nil
}
// use seeker implementation
// if we can
if r.rs != nil {
return r.skipSeek(n)
}
// loop on filling
// and then erasing
o := n
for r.buffered() < n && r.state == nil {
r.more()
// we can skip forward
// up to r.buffered() bytes
step := min(r.buffered(), n)
r.n += step
n -= step
}
// at this point, n should be
// 0 if everything went smoothly
return o - n, r.noEOF()
}
// Next returns the next 'n' bytes in the stream.
// Unlike Peek, Next advances the reader position.
// The returned bytes point to the same
// data as the buffer, so the slice is
// only valid until the next reader method call.
// An EOF is considered an unexpected error.
// If an the returned slice is less than the
// length asked for, an error will be returned,
// and the reader position will not be incremented.
func (r *Reader) Next(n int) ([]byte, error) {
// in case the buffer is too small
if cap(r.data) < n {
old := r.data[r.n:]
r.data = make([]byte, n+r.buffered())
r.data = r.data[:copy(r.data, old)]
r.n = 0
}
// fill at least 'n' bytes
for r.buffered() < n && r.state == nil {
r.more()
}
if r.buffered() < n {
return r.data[r.n:], r.noEOF()
}
out := r.data[r.n : r.n+n]
r.n += n
return out, nil
}
// skipSeek uses the io.Seeker to seek forward.
// only call this function when n > r.buffered()
func (r *Reader) skipSeek(n int) (int, error) {
o := r.buffered()
// first, clear buffer
n -= o
r.n = 0
r.data = r.data[:0]
// then seek forward remaning bytes
i, err := r.rs.Seek(int64(n), 1)
return int(i) + o, err
}
// Read implements `io.Reader`
func (r *Reader) Read(b []byte) (int, error) {
// if we have data in the buffer, just
// return that.
if r.buffered() != 0 {
x := copy(b, r.data[r.n:])
r.n += x
return x, nil
}
var n int
// we have no buffered data; determine
// whether or not to buffer or call
// the underlying reader directly
if len(b) >= cap(r.data) {
n, r.state = r.r.Read(b)
} else {
r.more()
n = copy(b, r.data)
r.n = n
}
if n == 0 {
return 0, r.err()
}
return n, nil
}
// ReadFull attempts to read len(b) bytes into
// 'b'. It returns the number of bytes read into
// 'b', and an error if it does not return len(b).
// EOF is considered an unexpected error.
func (r *Reader) ReadFull(b []byte) (int, error) {
var n int // read into b
var nn int // scratch
l := len(b)
// either read buffered data,
// or read directly for the underlying
// buffer, or fetch more buffered data.
for n < l && r.state == nil {
if r.buffered() != 0 {
nn = copy(b[n:], r.data[r.n:])
n += nn
r.n += nn
} else if l-n > cap(r.data) {
nn, r.state = r.r.Read(b[n:])
n += nn
} else {
r.more()
}
}
if n < l {
return n, r.noEOF()
}
return n, nil
}
// ReadByte implements `io.ByteReader`
func (r *Reader) ReadByte() (byte, error) {
for r.buffered() < 1 && r.state == nil {
r.more()
}
if r.buffered() < 1 {
return 0, r.err()
}
b := r.data[r.n]
r.n++
return b, nil
}
// WriteTo implements `io.WriterTo`
func (r *Reader) WriteTo(w io.Writer) (int64, error) {
var (
i int64
ii int
err error
)
// first, clear buffer
if r.buffered() > 0 {
ii, err = w.Write(r.data[r.n:])
i += int64(ii)
if err != nil {
return i, err
}
r.data = r.data[0:0]
r.n = 0
}
for r.state == nil {
// here we just do
// 1:1 reads and writes
r.more()
if r.buffered() > 0 {
ii, err = w.Write(r.data)
i += int64(ii)
if err != nil {
return i, err
}
r.data = r.data[0:0]
r.n = 0
}
}
if r.state != io.EOF {
return i, r.err()
}
return i, nil
}
func min(a int, b int) int {
if a < b {
return a
}
return b
}
func max(a int, b int) int {
if a < b {
return b
}
return a
}

View file

@ -1,224 +0,0 @@
package fwd
import "io"
const (
// DefaultWriterSize is the
// default write buffer size.
DefaultWriterSize = 2048
minWriterSize = minReaderSize
)
// Writer is a buffered writer
type Writer struct {
w io.Writer // writer
buf []byte // 0:len(buf) is bufered data
}
// NewWriter returns a new writer
// that writes to 'w' and has a buffer
// that is `DefaultWriterSize` bytes.
func NewWriter(w io.Writer) *Writer {
if wr, ok := w.(*Writer); ok {
return wr
}
return &Writer{
w: w,
buf: make([]byte, 0, DefaultWriterSize),
}
}
// NewWriterSize returns a new writer
// that writes to 'w' and has a buffer
// that is 'size' bytes.
func NewWriterSize(w io.Writer, size int) *Writer {
if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size {
return wr
}
return &Writer{
w: w,
buf: make([]byte, 0, max(size, minWriterSize)),
}
}
// Buffered returns the number of buffered bytes
// in the reader.
func (w *Writer) Buffered() int { return len(w.buf) }
// BufferSize returns the maximum size of the buffer.
func (w *Writer) BufferSize() int { return cap(w.buf) }
// Flush flushes any buffered bytes
// to the underlying writer.
func (w *Writer) Flush() error {
l := len(w.buf)
if l > 0 {
n, err := w.w.Write(w.buf)
// if we didn't write the whole
// thing, copy the unwritten
// bytes to the beginnning of the
// buffer.
if n < l && n > 0 {
w.pushback(n)
if err == nil {
err = io.ErrShortWrite
}
}
if err != nil {
return err
}
w.buf = w.buf[:0]
return nil
}
return nil
}
// Write implements `io.Writer`
func (w *Writer) Write(p []byte) (int, error) {
c, l, ln := cap(w.buf), len(w.buf), len(p)
avail := c - l
// requires flush
if avail < ln {
if err := w.Flush(); err != nil {
return 0, err
}
l = len(w.buf)
}
// too big to fit in buffer;
// write directly to w.w
if c < ln {
return w.w.Write(p)
}
// grow buf slice; copy; return
w.buf = w.buf[:l+ln]
return copy(w.buf[l:], p), nil
}
// WriteString is analogous to Write, but it takes a string.
func (w *Writer) WriteString(s string) (int, error) {
c, l, ln := cap(w.buf), len(w.buf), len(s)
avail := c - l
// requires flush
if avail < ln {
if err := w.Flush(); err != nil {
return 0, err
}
l = len(w.buf)
}
// too big to fit in buffer;
// write directly to w.w
//
// yes, this is unsafe. *but*
// io.Writer is not allowed
// to mutate its input or
// maintain a reference to it,
// per the spec in package io.
//
// plus, if the string is really
// too big to fit in the buffer, then
// creating a copy to write it is
// expensive (and, strictly speaking,
// unnecessary)
if c < ln {
return w.w.Write(unsafestr(s))
}
// grow buf slice; copy; return
w.buf = w.buf[:l+ln]
return copy(w.buf[l:], s), nil
}
// WriteByte implements `io.ByteWriter`
func (w *Writer) WriteByte(b byte) error {
if len(w.buf) == cap(w.buf) {
if err := w.Flush(); err != nil {
return err
}
}
w.buf = append(w.buf, b)
return nil
}
// Next returns the next 'n' free bytes
// in the write buffer, flushing the writer
// as necessary. Next will return `io.ErrShortBuffer`
// if 'n' is greater than the size of the write buffer.
// Calls to 'next' increment the write position by
// the size of the returned buffer.
func (w *Writer) Next(n int) ([]byte, error) {
c, l := cap(w.buf), len(w.buf)
if n > c {
return nil, io.ErrShortBuffer
}
avail := c - l
if avail < n {
if err := w.Flush(); err != nil {
return nil, err
}
l = len(w.buf)
}
w.buf = w.buf[:l+n]
return w.buf[l:], nil
}
// take the bytes from w.buf[n:len(w.buf)]
// and put them at the beginning of w.buf,
// and resize to the length of the copied segment.
func (w *Writer) pushback(n int) {
w.buf = w.buf[:copy(w.buf, w.buf[n:])]
}
// ReadFrom implements `io.ReaderFrom`
func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
// anticipatory flush
if err := w.Flush(); err != nil {
return 0, err
}
w.buf = w.buf[0:cap(w.buf)] // expand buffer
var nn int64 // written
var err error // error
var x int // read
// 1:1 reads and writes
for err == nil {
x, err = r.Read(w.buf)
if x > 0 {
n, werr := w.w.Write(w.buf[:x])
nn += int64(n)
if err != nil {
if n < x && n > 0 {
w.pushback(n - x)
}
return nn, werr
}
if n < x {
w.pushback(n - x)
return nn, io.ErrShortWrite
}
} else if err == nil {
err = io.ErrNoProgress
break
}
}
if err != io.EOF {
return nn, err
}
// we only clear here
// because we are sure
// the writes have
// succeeded. otherwise,
// we retain the data in case
// future writes succeed.
w.buf = w.buf[0:0]
return nn, nil
}

View file

@ -1,5 +0,0 @@
// +build appengine
package fwd
func unsafestr(s string) []byte { return []byte(s) }

View file

@ -1,18 +0,0 @@
// +build !appengine
package fwd
import (
"reflect"
"unsafe"
)
// unsafe cast string as []byte
func unsafestr(b string) []byte {
l := len(b)
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: l,
Cap: l,
Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data,
}))
}

View file

@ -1,12 +0,0 @@
# Contributing
In general, the code posted to the [SmartyStreets github organization](https://github.com/smartystreets) is created to solve specific problems at SmartyStreets that are ancillary to our core products in the address verification industry and may or may not be useful to other organizations or developers. Our reason for posting said code isn't necessarily to solicit feedback or contributions from the community but more as a showcase of some of the approaches to solving problems we have adopted.
Having stated that, we do consider issues raised by other githubbers as well as contributions submitted via pull requests. When submitting such a pull request, please follow these guidelines:
- _Look before you leap:_ If the changes you plan to make are significant, it's in everyone's best interest for you to discuss them with a SmartyStreets team member prior to opening a pull request.
- _License and ownership:_ If modifying the `LICENSE.md` file, limit your changes to fixing typographical mistakes. Do NOT modify the actual terms in the license or the copyright by **SmartyStreets, LLC**. Code submitted to SmartyStreets projects becomes property of SmartyStreets and must be compatible with the associated license.
- _Testing:_ If the code you are submitting resides in packages/modules covered by automated tests, be sure to add passing tests that cover your changes and assert expected behavior and state. Submit the additional test cases as part of your change set.
- _Style:_ Match your approach to **naming** and **formatting** with the surrounding code. Basically, the code you submit shouldn't stand out.
- "Naming" refers to such constructs as variables, methods, functions, classes, structs, interfaces, packages, modules, directories, files, etc...
- "Formatting" refers to such constructs as whitespace, horizontal line length, vertical function length, vertical file length, indentation, curly braces, etc...

View file

@ -1,23 +0,0 @@
Copyright (c) 2016 SmartyStreets, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
NOTE: Various optional and subordinate components carry their own licensing
requirements and restrictions. Use of those components is subject to the terms
and conditions outlined the respective license of each component.

View file

@ -1,575 +0,0 @@
# assertions
--
import "github.com/smartystreets/assertions"
Package assertions contains the implementations for all assertions which are
referenced in goconvey's `convey` package
(github.com/smartystreets/goconvey/convey) and gunit
(github.com/smartystreets/gunit) for use with the So(...) method. They can also
be used in traditional Go test functions and even in applications.
Many of the assertions lean heavily on work done by Aaron Jacobs in his
excellent oglematchers library. (https://github.com/jacobsa/oglematchers) The
ShouldResemble assertion leans heavily on work done by Daniel Jacques in his
very helpful go-render library. (https://github.com/luci/go-render)
## Usage
#### func GoConveyMode
```go
func GoConveyMode(yes bool)
```
GoConveyMode provides control over JSON serialization of failures. When using
the assertions in this package from the convey package JSON results are very
helpful and can be rendered in a DIFF view. In that case, this function will be
called with a true value to enable the JSON serialization. By default, the
assertions in this package will not serializer a JSON result, making standalone
ussage more convenient.
#### func ShouldAlmostEqual
```go
func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string
```
ShouldAlmostEqual makes sure that two parameters are close enough to being
equal. The acceptable delta may be specified with a third argument, or a very
small default delta will be used.
#### func ShouldBeBetween
```go
func ShouldBeBetween(actual interface{}, expected ...interface{}) string
```
ShouldBeBetween receives exactly three parameters: an actual value, a lower
bound, and an upper bound. It ensures that the actual value is between both
bounds (but not equal to either of them).
#### func ShouldBeBetweenOrEqual
```go
func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string
```
ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a
lower bound, and an upper bound. It ensures that the actual value is between
both bounds or equal to one of them.
#### func ShouldBeBlank
```go
func ShouldBeBlank(actual interface{}, expected ...interface{}) string
```
ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal
to "".
#### func ShouldBeChronological
```go
func ShouldBeChronological(actual interface{}, expected ...interface{}) string
```
ShouldBeChronological receives a []time.Time slice and asserts that the are in
chronological order starting with the first time.Time as the earliest.
#### func ShouldBeEmpty
```go
func ShouldBeEmpty(actual interface{}, expected ...interface{}) string
```
ShouldBeEmpty receives a single parameter (actual) and determines whether or not
calling len(actual) would return `0`. It obeys the rules specified by the len
function for determining length: http://golang.org/pkg/builtin/#len
#### func ShouldBeFalse
```go
func ShouldBeFalse(actual interface{}, expected ...interface{}) string
```
ShouldBeFalse receives a single parameter and ensures that it is false.
#### func ShouldBeGreaterThan
```go
func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string
```
ShouldBeGreaterThan receives exactly two parameters and ensures that the first
is greater than the second.
#### func ShouldBeGreaterThanOrEqualTo
```go
func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string
```
ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that
the first is greater than or equal to the second.
#### func ShouldBeIn
```go
func ShouldBeIn(actual interface{}, expected ...interface{}) string
```
ShouldBeIn receives at least 2 parameters. The first is a proposed member of the
collection that is passed in either as the second parameter, or of the
collection that is comprised of all the remaining parameters. This assertion
ensures that the proposed member is in the collection (using ShouldEqual).
#### func ShouldBeLessThan
```go
func ShouldBeLessThan(actual interface{}, expected ...interface{}) string
```
ShouldBeLessThan receives exactly two parameters and ensures that the first is
less than the second.
#### func ShouldBeLessThanOrEqualTo
```go
func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string
```
ShouldBeLessThan receives exactly two parameters and ensures that the first is
less than or equal to the second.
#### func ShouldBeNil
```go
func ShouldBeNil(actual interface{}, expected ...interface{}) string
```
ShouldBeNil receives a single parameter and ensures that it is nil.
#### func ShouldBeTrue
```go
func ShouldBeTrue(actual interface{}, expected ...interface{}) string
```
ShouldBeTrue receives a single parameter and ensures that it is true.
#### func ShouldBeZeroValue
```go
func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string
```
ShouldBeZeroValue receives a single parameter and ensures that it is the Go
equivalent of the default value, or "zero" value.
#### func ShouldContain
```go
func ShouldContain(actual interface{}, expected ...interface{}) string
```
ShouldContain receives exactly two parameters. The first is a slice and the
second is a proposed member. Membership is determined using ShouldEqual.
#### func ShouldContainKey
```go
func ShouldContainKey(actual interface{}, expected ...interface{}) string
```
ShouldContainKey receives exactly two parameters. The first is a map and the
second is a proposed key. Keys are compared with a simple '=='.
#### func ShouldContainSubstring
```go
func ShouldContainSubstring(actual interface{}, expected ...interface{}) string
```
ShouldContainSubstring receives exactly 2 string parameters and ensures that the
first contains the second as a substring.
#### func ShouldEndWith
```go
func ShouldEndWith(actual interface{}, expected ...interface{}) string
```
ShouldEndWith receives exactly 2 string parameters and ensures that the first
ends with the second.
#### func ShouldEqual
```go
func ShouldEqual(actual interface{}, expected ...interface{}) string
```
ShouldEqual receives exactly two parameters and does an equality check.
#### func ShouldEqualTrimSpace
```go
func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string
```
ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the
first is equal to the second after removing all leading and trailing whitespace
using strings.TrimSpace(first).
#### func ShouldEqualWithout
```go
func ShouldEqualWithout(actual interface{}, expected ...interface{}) string
```
ShouldEqualWithout receives exactly 3 string parameters and ensures that the
first is equal to the second after removing all instances of the third from the
first using strings.Replace(first, third, "", -1).
#### func ShouldHappenAfter
```go
func ShouldHappenAfter(actual interface{}, expected ...interface{}) string
```
ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the
first happens after the second.
#### func ShouldHappenBefore
```go
func ShouldHappenBefore(actual interface{}, expected ...interface{}) string
```
ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the
first happens before the second.
#### func ShouldHappenBetween
```go
func ShouldHappenBetween(actual interface{}, expected ...interface{}) string
```
ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the
first happens between (not on) the second and third.
#### func ShouldHappenOnOrAfter
```go
func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string
```
ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that
the first happens on or after the second.
#### func ShouldHappenOnOrBefore
```go
func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string
```
ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that
the first happens on or before the second.
#### func ShouldHappenOnOrBetween
```go
func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string
```
ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that
the first happens between or on the second and third.
#### func ShouldHappenWithin
```go
func ShouldHappenWithin(actual interface{}, expected ...interface{}) string
```
ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3
arguments) and asserts that the first time.Time happens within or on the
duration specified relative to the other time.Time.
#### func ShouldHaveLength
```go
func ShouldHaveLength(actual interface{}, expected ...interface{}) string
```
ShouldHaveLength receives 2 parameters. The first is a collection to check the
length of, the second being the expected length. It obeys the rules specified by
the len function for determining length: http://golang.org/pkg/builtin/#len
#### func ShouldHaveSameTypeAs
```go
func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string
```
ShouldHaveSameTypeAs receives exactly two parameters and compares their
underlying types for equality.
#### func ShouldImplement
```go
func ShouldImplement(actual interface{}, expectedList ...interface{}) string
```
ShouldImplement receives exactly two parameters and ensures that the first
implements the interface type of the second.
#### func ShouldNotAlmostEqual
```go
func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string
```
ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual
#### func ShouldNotBeBetween
```go
func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string
```
ShouldNotBeBetween receives exactly three parameters: an actual value, a lower
bound, and an upper bound. It ensures that the actual value is NOT between both
bounds.
#### func ShouldNotBeBetweenOrEqual
```go
func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string
```
ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a
lower bound, and an upper bound. It ensures that the actual value is nopt
between the bounds nor equal to either of them.
#### func ShouldNotBeBlank
```go
func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string
```
ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is
equal to "".
#### func ShouldNotBeEmpty
```go
func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string
```
ShouldNotBeEmpty receives a single parameter (actual) and determines whether or
not calling len(actual) would return a value greater than zero. It obeys the
rules specified by the `len` function for determining length:
http://golang.org/pkg/builtin/#len
#### func ShouldNotBeIn
```go
func ShouldNotBeIn(actual interface{}, expected ...interface{}) string
```
ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of
the collection that is passed in either as the second parameter, or of the
collection that is comprised of all the remaining parameters. This assertion
ensures that the proposed member is NOT in the collection (using ShouldEqual).
#### func ShouldNotBeNil
```go
func ShouldNotBeNil(actual interface{}, expected ...interface{}) string
```
ShouldNotBeNil receives a single parameter and ensures that it is not nil.
#### func ShouldNotContain
```go
func ShouldNotContain(actual interface{}, expected ...interface{}) string
```
ShouldNotContain receives exactly two parameters. The first is a slice and the
second is a proposed member. Membership is determinied using ShouldEqual.
#### func ShouldNotContainKey
```go
func ShouldNotContainKey(actual interface{}, expected ...interface{}) string
```
ShouldNotContainKey receives exactly two parameters. The first is a map and the
second is a proposed absent key. Keys are compared with a simple '=='.
#### func ShouldNotContainSubstring
```go
func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string
```
ShouldNotContainSubstring receives exactly 2 string parameters and ensures that
the first does NOT contain the second as a substring.
#### func ShouldNotEndWith
```go
func ShouldNotEndWith(actual interface{}, expected ...interface{}) string
```
ShouldEndWith receives exactly 2 string parameters and ensures that the first
does not end with the second.
#### func ShouldNotEqual
```go
func ShouldNotEqual(actual interface{}, expected ...interface{}) string
```
ShouldNotEqual receives exactly two parameters and does an inequality check.
#### func ShouldNotHappenOnOrBetween
```go
func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string
```
ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts
that the first does NOT happen between or on the second or third.
#### func ShouldNotHappenWithin
```go
func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string
```
ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3
arguments) and asserts that the first time.Time does NOT happen within or on the
duration specified relative to the other time.Time.
#### func ShouldNotHaveSameTypeAs
```go
func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string
```
ShouldNotHaveSameTypeAs receives exactly two parameters and compares their
underlying types for inequality.
#### func ShouldNotImplement
```go
func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string
```
ShouldNotImplement receives exactly two parameters and ensures that the first
does NOT implement the interface type of the second.
#### func ShouldNotPanic
```go
func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string)
```
ShouldNotPanic receives a void, niladic function and expects to execute the
function without any panic.
#### func ShouldNotPanicWith
```go
func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string)
```
ShouldNotPanicWith receives a void, niladic function and expects to recover a
panic whose content differs from the second argument.
#### func ShouldNotPointTo
```go
func ShouldNotPointTo(actual interface{}, expected ...interface{}) string
```
ShouldNotPointTo receives exactly two parameters and checks to see that they
point to different addresess.
#### func ShouldNotResemble
```go
func ShouldNotResemble(actual interface{}, expected ...interface{}) string
```
ShouldNotResemble receives exactly two parameters and does an inverse deep equal
check (see reflect.DeepEqual)
#### func ShouldNotStartWith
```go
func ShouldNotStartWith(actual interface{}, expected ...interface{}) string
```
ShouldNotStartWith receives exactly 2 string parameters and ensures that the
first does not start with the second.
#### func ShouldPanic
```go
func ShouldPanic(actual interface{}, expected ...interface{}) (message string)
```
ShouldPanic receives a void, niladic function and expects to recover a panic.
#### func ShouldPanicWith
```go
func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string)
```
ShouldPanicWith receives a void, niladic function and expects to recover a panic
with the second argument as the content.
#### func ShouldPointTo
```go
func ShouldPointTo(actual interface{}, expected ...interface{}) string
```
ShouldPointTo receives exactly two parameters and checks to see that they point
to the same address.
#### func ShouldResemble
```go
func ShouldResemble(actual interface{}, expected ...interface{}) string
```
ShouldResemble receives exactly two parameters and does a deep equal check (see
reflect.DeepEqual)
#### func ShouldStartWith
```go
func ShouldStartWith(actual interface{}, expected ...interface{}) string
```
ShouldStartWith receives exactly 2 string parameters and ensures that the first
starts with the second.
#### func So
```go
func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string)
```
So is a convenience function (as opposed to an inconvenience function?) for
running assertions on arbitrary arguments in any context, be it for testing or
even application logging. It allows you to perform assertion-like behavior (and
get nicely formatted messages detailing discrepancies) but without the program
blowing up or panicking. All that is required is to import this package and call
`So` with one of the assertions exported by this package as the second
parameter. The first return parameter is a boolean indicating if the assertion
was true. The second return parameter is the well-formatted message showing why
an assertion was incorrect, or blank if the assertion was correct.
Example:
if ok, message := So(x, ShouldBeGreaterThan, y); !ok {
log.Println(message)
}
#### type Assertion
```go
type Assertion struct {
}
```
#### func New
```go
func New(t testingT) *Assertion
```
New swallows the *testing.T struct and prints failed assertions using t.Error.
Example: assertions.New(t).So(1, should.Equal, 1)
#### func (*Assertion) Failed
```go
func (this *Assertion) Failed() bool
```
Failed reports whether any calls to So (on this Assertion instance) have failed.
#### func (*Assertion) So
```go
func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool
```
So calls the standalone So function and additionally, calls t.Error in failure
scenarios.
#### type FailureView
```go
type FailureView struct {
Message string `json:"Message"`
Expected string `json:"Expected"`
Actual string `json:"Actual"`
}
```
This struct is also declared in
github.com/smartystreets/goconvey/convey/reporting. The json struct tags should
be equal in both declarations.
#### type Serializer
```go
type Serializer interface {
// contains filtered or unexported methods
}
```

View file

@ -1,244 +0,0 @@
package assertions
import (
"fmt"
"reflect"
"github.com/smartystreets/assertions/internal/oglematchers"
)
// ShouldContain receives exactly two parameters. The first is a slice and the
// second is a proposed member. Membership is determined using ShouldEqual.
func ShouldContain(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil {
typeName := reflect.TypeOf(actual)
if fmt.Sprintf("%v", matchError) == "which is not a slice or array" {
return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName)
}
return fmt.Sprintf(shouldHaveContained, typeName, expected[0])
}
return success
}
// ShouldNotContain receives exactly two parameters. The first is a slice and the
// second is a proposed member. Membership is determinied using ShouldEqual.
func ShouldNotContain(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
typeName := reflect.TypeOf(actual)
if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil {
if fmt.Sprintf("%v", matchError) == "which is not a slice or array" {
return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName)
}
return success
}
return fmt.Sprintf(shouldNotHaveContained, typeName, expected[0])
}
// ShouldContainKey receives exactly two parameters. The first is a map and the
// second is a proposed key. Keys are compared with a simple '=='.
func ShouldContainKey(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
keys, isMap := mapKeys(actual)
if !isMap {
return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual))
}
if !keyFound(keys, expected[0]) {
return fmt.Sprintf(shouldHaveContainedKey, reflect.TypeOf(actual), expected)
}
return ""
}
// ShouldNotContainKey receives exactly two parameters. The first is a map and the
// second is a proposed absent key. Keys are compared with a simple '=='.
func ShouldNotContainKey(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
keys, isMap := mapKeys(actual)
if !isMap {
return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual))
}
if keyFound(keys, expected[0]) {
return fmt.Sprintf(shouldNotHaveContainedKey, reflect.TypeOf(actual), expected)
}
return ""
}
func mapKeys(m interface{}) ([]reflect.Value, bool) {
value := reflect.ValueOf(m)
if value.Kind() != reflect.Map {
return nil, false
}
return value.MapKeys(), true
}
func keyFound(keys []reflect.Value, expectedKey interface{}) bool {
found := false
for _, key := range keys {
if key.Interface() == expectedKey {
found = true
}
}
return found
}
// ShouldBeIn receives at least 2 parameters. The first is a proposed member of the collection
// that is passed in either as the second parameter, or of the collection that is comprised
// of all the remaining parameters. This assertion ensures that the proposed member is in
// the collection (using ShouldEqual).
func ShouldBeIn(actual interface{}, expected ...interface{}) string {
if fail := atLeast(1, expected); fail != success {
return fail
}
if len(expected) == 1 {
return shouldBeIn(actual, expected[0])
}
return shouldBeIn(actual, expected)
}
func shouldBeIn(actual interface{}, expected interface{}) string {
if matchError := oglematchers.Contains(actual).Matches(expected); matchError != nil {
return fmt.Sprintf(shouldHaveBeenIn, actual, reflect.TypeOf(expected))
}
return success
}
// ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of the collection
// that is passed in either as the second parameter, or of the collection that is comprised
// of all the remaining parameters. This assertion ensures that the proposed member is NOT in
// the collection (using ShouldEqual).
func ShouldNotBeIn(actual interface{}, expected ...interface{}) string {
if fail := atLeast(1, expected); fail != success {
return fail
}
if len(expected) == 1 {
return shouldNotBeIn(actual, expected[0])
}
return shouldNotBeIn(actual, expected)
}
func shouldNotBeIn(actual interface{}, expected interface{}) string {
if matchError := oglematchers.Contains(actual).Matches(expected); matchError == nil {
return fmt.Sprintf(shouldNotHaveBeenIn, actual, reflect.TypeOf(expected))
}
return success
}
// ShouldBeEmpty receives a single parameter (actual) and determines whether or not
// calling len(actual) would return `0`. It obeys the rules specified by the len
// function for determining length: http://golang.org/pkg/builtin/#len
func ShouldBeEmpty(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
}
if actual == nil {
return success
}
value := reflect.ValueOf(actual)
switch value.Kind() {
case reflect.Slice:
if value.Len() == 0 {
return success
}
case reflect.Chan:
if value.Len() == 0 {
return success
}
case reflect.Map:
if value.Len() == 0 {
return success
}
case reflect.String:
if value.Len() == 0 {
return success
}
case reflect.Ptr:
elem := value.Elem()
kind := elem.Kind()
if (kind == reflect.Slice || kind == reflect.Array) && elem.Len() == 0 {
return success
}
}
return fmt.Sprintf(shouldHaveBeenEmpty, actual)
}
// ShouldNotBeEmpty receives a single parameter (actual) and determines whether or not
// calling len(actual) would return a value greater than zero. It obeys the rules
// specified by the `len` function for determining length: http://golang.org/pkg/builtin/#len
func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
}
if empty := ShouldBeEmpty(actual, expected...); empty != success {
return success
}
return fmt.Sprintf(shouldNotHaveBeenEmpty, actual)
}
// ShouldHaveLength receives 2 parameters. The first is a collection to check
// the length of, the second being the expected length. It obeys the rules
// specified by the len function for determining length:
// http://golang.org/pkg/builtin/#len
func ShouldHaveLength(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
var expectedLen int64
lenValue := reflect.ValueOf(expected[0])
switch lenValue.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
expectedLen = lenValue.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
expectedLen = int64(lenValue.Uint())
default:
return fmt.Sprintf(shouldHaveBeenAValidInteger, reflect.TypeOf(expected[0]))
}
if expectedLen < 0 {
return fmt.Sprintf(shouldHaveBeenAValidLength, expected[0])
}
value := reflect.ValueOf(actual)
switch value.Kind() {
case reflect.Slice,
reflect.Chan,
reflect.Map,
reflect.String:
if int64(value.Len()) == expectedLen {
return success
} else {
return fmt.Sprintf(shouldHaveHadLength, actual, value.Len(), expectedLen)
}
case reflect.Ptr:
elem := value.Elem()
kind := elem.Kind()
if kind == reflect.Slice || kind == reflect.Array {
if int64(elem.Len()) == expectedLen {
return success
} else {
return fmt.Sprintf(shouldHaveHadLength, actual, elem.Len(), expectedLen)
}
}
}
return fmt.Sprintf(shouldHaveBeenAValidCollection, reflect.TypeOf(actual))
}

View file

@ -1,105 +0,0 @@
// Package assertions contains the implementations for all assertions which
// are referenced in goconvey's `convey` package
// (github.com/smartystreets/goconvey/convey) and gunit (github.com/smartystreets/gunit)
// for use with the So(...) method.
// They can also be used in traditional Go test functions and even in
// applications.
//
// Many of the assertions lean heavily on work done by Aaron Jacobs in his excellent oglematchers library.
// (https://github.com/jacobsa/oglematchers)
// The ShouldResemble assertion leans heavily on work done by Daniel Jacques in his very helpful go-render library.
// (https://github.com/luci/go-render)
package assertions
import (
"fmt"
"runtime"
)
// By default we use a no-op serializer. The actual Serializer provides a JSON
// representation of failure results on selected assertions so the goconvey
// web UI can display a convenient diff.
var serializer Serializer = new(noopSerializer)
// GoConveyMode provides control over JSON serialization of failures. When
// using the assertions in this package from the convey package JSON results
// are very helpful and can be rendered in a DIFF view. In that case, this function
// will be called with a true value to enable the JSON serialization. By default,
// the assertions in this package will not serializer a JSON result, making
// standalone usage more convenient.
func GoConveyMode(yes bool) {
if yes {
serializer = newSerializer()
} else {
serializer = new(noopSerializer)
}
}
type testingT interface {
Error(args ...interface{})
}
type Assertion struct {
t testingT
failed bool
}
// New swallows the *testing.T struct and prints failed assertions using t.Error.
// Example: assertions.New(t).So(1, should.Equal, 1)
func New(t testingT) *Assertion {
return &Assertion{t: t}
}
// Failed reports whether any calls to So (on this Assertion instance) have failed.
func (this *Assertion) Failed() bool {
return this.failed
}
// So calls the standalone So function and additionally, calls t.Error in failure scenarios.
func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool {
ok, result := So(actual, assert, expected...)
if !ok {
this.failed = true
_, file, line, _ := runtime.Caller(1)
this.t.Error(fmt.Sprintf("\n%s:%d\n%s", file, line, result))
}
return ok
}
// So is a convenience function (as opposed to an inconvenience function?)
// for running assertions on arbitrary arguments in any context, be it for testing or even
// application logging. It allows you to perform assertion-like behavior (and get nicely
// formatted messages detailing discrepancies) but without the program blowing up or panicking.
// All that is required is to import this package and call `So` with one of the assertions
// exported by this package as the second parameter.
// The first return parameter is a boolean indicating if the assertion was true. The second
// return parameter is the well-formatted message showing why an assertion was incorrect, or
// blank if the assertion was correct.
//
// Example:
//
// if ok, message := So(x, ShouldBeGreaterThan, y); !ok {
// log.Println(message)
// }
//
func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) {
if result := so(actual, assert, expected...); len(result) == 0 {
return true, result
} else {
return false, result
}
}
// so is like So, except that it only returns the string message, which is blank if the
// assertion passed. Used to facilitate testing.
func so(actual interface{}, assert func(interface{}, ...interface{}) string, expected ...interface{}) string {
return assert(actual, expected...)
}
// assertion is an alias for a function with a signature that the So()
// function can handle. Any future or custom assertions should conform to this
// method signature. The return value should be an empty string if the assertion
// passes and a well-formed failure message if not.
type assertion func(actual interface{}, expected ...interface{}) string
////////////////////////////////////////////////////////////////////////////

View file

@ -1,280 +0,0 @@
package assertions
import (
"errors"
"fmt"
"math"
"reflect"
"strings"
"github.com/smartystreets/assertions/internal/go-render/render"
"github.com/smartystreets/assertions/internal/oglematchers"
)
// default acceptable delta for ShouldAlmostEqual
const defaultDelta = 0.0000000001
// ShouldEqual receives exactly two parameters and does an equality check.
func ShouldEqual(actual interface{}, expected ...interface{}) string {
if message := need(1, expected); message != success {
return message
}
return shouldEqual(actual, expected[0])
}
func shouldEqual(actual, expected interface{}) (message string) {
defer func() {
if r := recover(); r != nil {
message = serializer.serialize(expected, actual, fmt.Sprintf(shouldHaveBeenEqual, expected, actual))
return
}
}()
if matchError := oglematchers.Equals(expected).Matches(actual); matchError != nil {
expectedSyntax := fmt.Sprintf("%v", expected)
actualSyntax := fmt.Sprintf("%v", actual)
if expectedSyntax == actualSyntax && reflect.TypeOf(expected) != reflect.TypeOf(actual) {
message = fmt.Sprintf(shouldHaveBeenEqualTypeMismatch, expected, expected, actual, actual)
} else {
message = fmt.Sprintf(shouldHaveBeenEqual, expected, actual)
}
message = serializer.serialize(expected, actual, message)
return
}
return success
}
// ShouldNotEqual receives exactly two parameters and does an inequality check.
func ShouldNotEqual(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
} else if ShouldEqual(actual, expected[0]) == success {
return fmt.Sprintf(shouldNotHaveBeenEqual, actual, expected[0])
}
return success
}
// ShouldAlmostEqual makes sure that two parameters are close enough to being equal.
// The acceptable delta may be specified with a third argument,
// or a very small default delta will be used.
func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string {
actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...)
if err != "" {
return err
}
if math.Abs(actualFloat-expectedFloat) <= deltaFloat {
return success
} else {
return fmt.Sprintf(shouldHaveBeenAlmostEqual, actualFloat, expectedFloat)
}
}
// ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual
func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string {
actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...)
if err != "" {
return err
}
if math.Abs(actualFloat-expectedFloat) > deltaFloat {
return success
} else {
return fmt.Sprintf(shouldHaveNotBeenAlmostEqual, actualFloat, expectedFloat)
}
}
func cleanAlmostEqualInput(actual interface{}, expected ...interface{}) (float64, float64, float64, string) {
deltaFloat := 0.0000000001
if len(expected) == 0 {
return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided neither)"
} else if len(expected) == 2 {
delta, err := getFloat(expected[1])
if err != nil {
return 0.0, 0.0, 0.0, "delta must be a numerical type"
}
deltaFloat = delta
} else if len(expected) > 2 {
return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided more values)"
}
actualFloat, err := getFloat(actual)
if err != nil {
return 0.0, 0.0, 0.0, err.Error()
}
expectedFloat, err := getFloat(expected[0])
if err != nil {
return 0.0, 0.0, 0.0, err.Error()
}
return actualFloat, expectedFloat, deltaFloat, ""
}
// returns the float value of any real number, or error if it is not a numerical type
func getFloat(num interface{}) (float64, error) {
numValue := reflect.ValueOf(num)
numKind := numValue.Kind()
if numKind == reflect.Int ||
numKind == reflect.Int8 ||
numKind == reflect.Int16 ||
numKind == reflect.Int32 ||
numKind == reflect.Int64 {
return float64(numValue.Int()), nil
} else if numKind == reflect.Uint ||
numKind == reflect.Uint8 ||
numKind == reflect.Uint16 ||
numKind == reflect.Uint32 ||
numKind == reflect.Uint64 {
return float64(numValue.Uint()), nil
} else if numKind == reflect.Float32 ||
numKind == reflect.Float64 {
return numValue.Float(), nil
} else {
return 0.0, errors.New("must be a numerical type, but was " + numKind.String())
}
}
// ShouldResemble receives exactly two parameters and does a deep equal check (see reflect.DeepEqual)
func ShouldResemble(actual interface{}, expected ...interface{}) string {
if message := need(1, expected); message != success {
return message
}
if matchError := oglematchers.DeepEquals(expected[0]).Matches(actual); matchError != nil {
return serializer.serializeDetailed(expected[0], actual,
fmt.Sprintf(shouldHaveResembled, render.Render(expected[0]), render.Render(actual)))
}
return success
}
// ShouldNotResemble receives exactly two parameters and does an inverse deep equal check (see reflect.DeepEqual)
func ShouldNotResemble(actual interface{}, expected ...interface{}) string {
if message := need(1, expected); message != success {
return message
} else if ShouldResemble(actual, expected[0]) == success {
return fmt.Sprintf(shouldNotHaveResembled, render.Render(actual), render.Render(expected[0]))
}
return success
}
// ShouldPointTo receives exactly two parameters and checks to see that they point to the same address.
func ShouldPointTo(actual interface{}, expected ...interface{}) string {
if message := need(1, expected); message != success {
return message
}
return shouldPointTo(actual, expected[0])
}
func shouldPointTo(actual, expected interface{}) string {
actualValue := reflect.ValueOf(actual)
expectedValue := reflect.ValueOf(expected)
if ShouldNotBeNil(actual) != success {
return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "nil")
} else if ShouldNotBeNil(expected) != success {
return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "nil")
} else if actualValue.Kind() != reflect.Ptr {
return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "not")
} else if expectedValue.Kind() != reflect.Ptr {
return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "not")
} else if ShouldEqual(actualValue.Pointer(), expectedValue.Pointer()) != success {
actualAddress := reflect.ValueOf(actual).Pointer()
expectedAddress := reflect.ValueOf(expected).Pointer()
return serializer.serialize(expectedAddress, actualAddress, fmt.Sprintf(shouldHavePointedTo,
actual, actualAddress,
expected, expectedAddress))
}
return success
}
// ShouldNotPointTo receives exactly two parameters and checks to see that they point to different addresess.
func ShouldNotPointTo(actual interface{}, expected ...interface{}) string {
if message := need(1, expected); message != success {
return message
}
compare := ShouldPointTo(actual, expected[0])
if strings.HasPrefix(compare, shouldBePointers) {
return compare
} else if compare == success {
return fmt.Sprintf(shouldNotHavePointedTo, actual, expected[0], reflect.ValueOf(actual).Pointer())
}
return success
}
// ShouldBeNil receives a single parameter and ensures that it is nil.
func ShouldBeNil(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
} else if actual == nil {
return success
} else if interfaceHasNilValue(actual) {
return success
}
return fmt.Sprintf(shouldHaveBeenNil, actual)
}
func interfaceHasNilValue(actual interface{}) bool {
value := reflect.ValueOf(actual)
kind := value.Kind()
nilable := kind == reflect.Slice ||
kind == reflect.Chan ||
kind == reflect.Func ||
kind == reflect.Ptr ||
kind == reflect.Map
// Careful: reflect.Value.IsNil() will panic unless it's an interface, chan, map, func, slice, or ptr
// Reference: http://golang.org/pkg/reflect/#Value.IsNil
return nilable && value.IsNil()
}
// ShouldNotBeNil receives a single parameter and ensures that it is not nil.
func ShouldNotBeNil(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
} else if ShouldBeNil(actual) == success {
return fmt.Sprintf(shouldNotHaveBeenNil, actual)
}
return success
}
// ShouldBeTrue receives a single parameter and ensures that it is true.
func ShouldBeTrue(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
} else if actual != true {
return fmt.Sprintf(shouldHaveBeenTrue, actual)
}
return success
}
// ShouldBeFalse receives a single parameter and ensures that it is false.
func ShouldBeFalse(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
} else if actual != false {
return fmt.Sprintf(shouldHaveBeenFalse, actual)
}
return success
}
// ShouldBeZeroValue receives a single parameter and ensures that it is
// the Go equivalent of the default value, or "zero" value.
func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string {
if fail := need(0, expected); fail != success {
return fail
}
zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface()
if !reflect.DeepEqual(zeroVal, actual) {
return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldHaveBeenZeroValue, actual))
}
return success
}

View file

@ -1,23 +0,0 @@
package assertions
import "fmt"
const (
success = ""
needExactValues = "This assertion requires exactly %d comparison values (you provided %d)."
needNonEmptyCollection = "This assertion requires at least 1 comparison value (you provided 0)."
)
func need(needed int, expected []interface{}) string {
if len(expected) != needed {
return fmt.Sprintf(needExactValues, needed, len(expected))
}
return success
}
func atLeast(minimum int, expected []interface{}) string {
if len(expected) < 1 {
return needNonEmptyCollection
}
return success
}

View file

@ -1,27 +0,0 @@
// Copyright (c) 2015 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,477 +0,0 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package render
import (
"bytes"
"fmt"
"reflect"
"sort"
"strconv"
)
var builtinTypeMap = map[reflect.Kind]string{
reflect.Bool: "bool",
reflect.Complex128: "complex128",
reflect.Complex64: "complex64",
reflect.Float32: "float32",
reflect.Float64: "float64",
reflect.Int16: "int16",
reflect.Int32: "int32",
reflect.Int64: "int64",
reflect.Int8: "int8",
reflect.Int: "int",
reflect.String: "string",
reflect.Uint16: "uint16",
reflect.Uint32: "uint32",
reflect.Uint64: "uint64",
reflect.Uint8: "uint8",
reflect.Uint: "uint",
reflect.Uintptr: "uintptr",
}
var builtinTypeSet = map[string]struct{}{}
func init() {
for _, v := range builtinTypeMap {
builtinTypeSet[v] = struct{}{}
}
}
var typeOfString = reflect.TypeOf("")
var typeOfInt = reflect.TypeOf(int(1))
var typeOfUint = reflect.TypeOf(uint(1))
var typeOfFloat = reflect.TypeOf(10.1)
// Render converts a structure to a string representation. Unline the "%#v"
// format string, this resolves pointer types' contents in structs, maps, and
// slices/arrays and prints their field values.
func Render(v interface{}) string {
buf := bytes.Buffer{}
s := (*traverseState)(nil)
s.render(&buf, 0, reflect.ValueOf(v), false)
return buf.String()
}
// renderPointer is called to render a pointer value.
//
// This is overridable so that the test suite can have deterministic pointer
// values in its expectations.
var renderPointer = func(buf *bytes.Buffer, p uintptr) {
fmt.Fprintf(buf, "0x%016x", p)
}
// traverseState is used to note and avoid recursion as struct members are being
// traversed.
//
// traverseState is allowed to be nil. Specifically, the root state is nil.
type traverseState struct {
parent *traverseState
ptr uintptr
}
func (s *traverseState) forkFor(ptr uintptr) *traverseState {
for cur := s; cur != nil; cur = cur.parent {
if ptr == cur.ptr {
return nil
}
}
fs := &traverseState{
parent: s,
ptr: ptr,
}
return fs
}
func (s *traverseState) render(buf *bytes.Buffer, ptrs int, v reflect.Value, implicit bool) {
if v.Kind() == reflect.Invalid {
buf.WriteString("nil")
return
}
vt := v.Type()
// If the type being rendered is a potentially recursive type (a type that
// can contain itself as a member), we need to avoid recursion.
//
// If we've already seen this type before, mark that this is the case and
// write a recursion placeholder instead of actually rendering it.
//
// If we haven't seen it before, fork our `seen` tracking so any higher-up
// renderers will also render it at least once, then mark that we've seen it
// to avoid recursing on lower layers.
pe := uintptr(0)
vk := vt.Kind()
switch vk {
case reflect.Ptr:
// Since structs and arrays aren't pointers, they can't directly be
// recursed, but they can contain pointers to themselves. Record their
// pointer to avoid this.
switch v.Elem().Kind() {
case reflect.Struct, reflect.Array:
pe = v.Pointer()
}
case reflect.Slice, reflect.Map:
pe = v.Pointer()
}
if pe != 0 {
s = s.forkFor(pe)
if s == nil {
buf.WriteString("<REC(")
if !implicit {
writeType(buf, ptrs, vt)
}
buf.WriteString(")>")
return
}
}
isAnon := func(t reflect.Type) bool {
if t.Name() != "" {
if _, ok := builtinTypeSet[t.Name()]; !ok {
return false
}
}
return t.Kind() != reflect.Interface
}
switch vk {
case reflect.Struct:
if !implicit {
writeType(buf, ptrs, vt)
}
structAnon := vt.Name() == ""
buf.WriteRune('{')
for i := 0; i < vt.NumField(); i++ {
if i > 0 {
buf.WriteString(", ")
}
anon := structAnon && isAnon(vt.Field(i).Type)
if !anon {
buf.WriteString(vt.Field(i).Name)
buf.WriteRune(':')
}
s.render(buf, 0, v.Field(i), anon)
}
buf.WriteRune('}')
case reflect.Slice:
if v.IsNil() {
if !implicit {
writeType(buf, ptrs, vt)
buf.WriteString("(nil)")
} else {
buf.WriteString("nil")
}
return
}
fallthrough
case reflect.Array:
if !implicit {
writeType(buf, ptrs, vt)
}
anon := vt.Name() == "" && isAnon(vt.Elem())
buf.WriteString("{")
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteString(", ")
}
s.render(buf, 0, v.Index(i), anon)
}
buf.WriteRune('}')
case reflect.Map:
if !implicit {
writeType(buf, ptrs, vt)
}
if v.IsNil() {
buf.WriteString("(nil)")
} else {
buf.WriteString("{")
mkeys := v.MapKeys()
tryAndSortMapKeys(vt, mkeys)
kt := vt.Key()
keyAnon := typeOfString.ConvertibleTo(kt) || typeOfInt.ConvertibleTo(kt) || typeOfUint.ConvertibleTo(kt) || typeOfFloat.ConvertibleTo(kt)
valAnon := vt.Name() == "" && isAnon(vt.Elem())
for i, mk := range mkeys {
if i > 0 {
buf.WriteString(", ")
}
s.render(buf, 0, mk, keyAnon)
buf.WriteString(":")
s.render(buf, 0, v.MapIndex(mk), valAnon)
}
buf.WriteRune('}')
}
case reflect.Ptr:
ptrs++
fallthrough
case reflect.Interface:
if v.IsNil() {
writeType(buf, ptrs, v.Type())
buf.WriteString("(nil)")
} else {
s.render(buf, ptrs, v.Elem(), false)
}
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
writeType(buf, ptrs, vt)
buf.WriteRune('(')
renderPointer(buf, v.Pointer())
buf.WriteRune(')')
default:
tstr := vt.String()
implicit = implicit || (ptrs == 0 && builtinTypeMap[vk] == tstr)
if !implicit {
writeType(buf, ptrs, vt)
buf.WriteRune('(')
}
switch vk {
case reflect.String:
fmt.Fprintf(buf, "%q", v.String())
case reflect.Bool:
fmt.Fprintf(buf, "%v", v.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
fmt.Fprintf(buf, "%d", v.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
fmt.Fprintf(buf, "%d", v.Uint())
case reflect.Float32, reflect.Float64:
fmt.Fprintf(buf, "%g", v.Float())
case reflect.Complex64, reflect.Complex128:
fmt.Fprintf(buf, "%g", v.Complex())
}
if !implicit {
buf.WriteRune(')')
}
}
}
func writeType(buf *bytes.Buffer, ptrs int, t reflect.Type) {
parens := ptrs > 0
switch t.Kind() {
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
parens = true
}
if parens {
buf.WriteRune('(')
for i := 0; i < ptrs; i++ {
buf.WriteRune('*')
}
}
switch t.Kind() {
case reflect.Ptr:
if ptrs == 0 {
// This pointer was referenced from within writeType (e.g., as part of
// rendering a list), and so hasn't had its pointer asterisk accounted
// for.
buf.WriteRune('*')
}
writeType(buf, 0, t.Elem())
case reflect.Interface:
if n := t.Name(); n != "" {
buf.WriteString(t.String())
} else {
buf.WriteString("interface{}")
}
case reflect.Array:
buf.WriteRune('[')
buf.WriteString(strconv.FormatInt(int64(t.Len()), 10))
buf.WriteRune(']')
writeType(buf, 0, t.Elem())
case reflect.Slice:
if t == reflect.SliceOf(t.Elem()) {
buf.WriteString("[]")
writeType(buf, 0, t.Elem())
} else {
// Custom slice type, use type name.
buf.WriteString(t.String())
}
case reflect.Map:
if t == reflect.MapOf(t.Key(), t.Elem()) {
buf.WriteString("map[")
writeType(buf, 0, t.Key())
buf.WriteRune(']')
writeType(buf, 0, t.Elem())
} else {
// Custom map type, use type name.
buf.WriteString(t.String())
}
default:
buf.WriteString(t.String())
}
if parens {
buf.WriteRune(')')
}
}
type cmpFn func(a, b reflect.Value) int
type sortableValueSlice struct {
cmp cmpFn
elements []reflect.Value
}
func (s sortableValueSlice) Len() int {
return len(s.elements)
}
func (s sortableValueSlice) Less(i, j int) bool {
return s.cmp(s.elements[i], s.elements[j]) < 0
}
func (s sortableValueSlice) Swap(i, j int) {
s.elements[i], s.elements[j] = s.elements[j], s.elements[i]
}
// cmpForType returns a cmpFn which sorts the data for some type t in the same
// order that a go-native map key is compared for equality.
func cmpForType(t reflect.Type) cmpFn {
switch t.Kind() {
case reflect.String:
return func(av, bv reflect.Value) int {
a, b := av.String(), bv.String()
if a < b {
return -1
} else if a > b {
return 1
}
return 0
}
case reflect.Bool:
return func(av, bv reflect.Value) int {
a, b := av.Bool(), bv.Bool()
if !a && b {
return -1
} else if a && !b {
return 1
}
return 0
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return func(av, bv reflect.Value) int {
a, b := av.Int(), bv.Int()
if a < b {
return -1
} else if a > b {
return 1
}
return 0
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64, reflect.Uintptr, reflect.UnsafePointer:
return func(av, bv reflect.Value) int {
a, b := av.Uint(), bv.Uint()
if a < b {
return -1
} else if a > b {
return 1
}
return 0
}
case reflect.Float32, reflect.Float64:
return func(av, bv reflect.Value) int {
a, b := av.Float(), bv.Float()
if a < b {
return -1
} else if a > b {
return 1
}
return 0
}
case reflect.Interface:
return func(av, bv reflect.Value) int {
a, b := av.InterfaceData(), bv.InterfaceData()
if a[0] < b[0] {
return -1
} else if a[0] > b[0] {
return 1
}
if a[1] < b[1] {
return -1
} else if a[1] > b[1] {
return 1
}
return 0
}
case reflect.Complex64, reflect.Complex128:
return func(av, bv reflect.Value) int {
a, b := av.Complex(), bv.Complex()
if real(a) < real(b) {
return -1
} else if real(a) > real(b) {
return 1
}
if imag(a) < imag(b) {
return -1
} else if imag(a) > imag(b) {
return 1
}
return 0
}
case reflect.Ptr, reflect.Chan:
return func(av, bv reflect.Value) int {
a, b := av.Pointer(), bv.Pointer()
if a < b {
return -1
} else if a > b {
return 1
}
return 0
}
case reflect.Struct:
cmpLst := make([]cmpFn, t.NumField())
for i := range cmpLst {
cmpLst[i] = cmpForType(t.Field(i).Type)
}
return func(a, b reflect.Value) int {
for i, cmp := range cmpLst {
if rslt := cmp(a.Field(i), b.Field(i)); rslt != 0 {
return rslt
}
}
return 0
}
}
return nil
}
func tryAndSortMapKeys(mt reflect.Type, k []reflect.Value) {
if cmp := cmpForType(mt.Key()); cmp != nil {
sort.Sort(sortableValueSlice{cmp, k})
}
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,58 +0,0 @@
[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers)
`oglematchers` is a package for the Go programming language containing a set of
matchers, useful in a testing or mocking framework, inspired by and mostly
compatible with [Google Test][googletest] for C++ and
[Google JS Test][google-js-test]. The package is used by the
[ogletest][ogletest] testing framework and [oglemock][oglemock] mocking
framework, which may be more directly useful to you, but can be generically used
elsewhere as well.
A "matcher" is simply an object with a `Matches` method defining a set of golang
values matched by the matcher, and a `Description` method describing that set.
For example, here are some matchers:
```go
// Numbers
Equals(17.13)
LessThan(19)
// Strings
Equals("taco")
HasSubstr("burrito")
MatchesRegex("t.*o")
// Combining matchers
AnyOf(LessThan(17), GreaterThan(19))
```
There are lots more; see [here][reference] for a reference. You can also add
your own simply by implementing the `oglematchers.Matcher` interface.
Installation
------------
First, make sure you have installed Go 1.0.2 or newer. See
[here][golang-install] for instructions.
Use the following command to install `oglematchers` and keep it up to date:
go get -u github.com/smartystreets/assertions/internal/oglematchers
Documentation
-------------
See [here][reference] for documentation. Alternatively, you can install the
package and then use `godoc`:
godoc github.com/smartystreets/assertions/internal/oglematchers
[reference]: http://godoc.org/github.com/smartystreets/assertions/internal/oglematchers
[golang-install]: http://golang.org/doc/install.html
[googletest]: http://code.google.com/p/googletest/
[google-js-test]: http://code.google.com/p/google-js-test/
[ogletest]: http://github.com/smartystreets/assertions/internal/ogletest
[oglemock]: http://github.com/smartystreets/assertions/internal/oglemock

Some files were not shown because too many files have changed in this diff Show more